summaryrefslogtreecommitdiff
path: root/db-4.8.30/test
diff options
context:
space:
mode:
Diffstat (limited to 'db-4.8.30/test')
-rw-r--r--db-4.8.30/test/README147
-rw-r--r--db-4.8.30/test/TESTS3199
-rw-r--r--db-4.8.30/test/archive.tcl255
-rw-r--r--db-4.8.30/test/backup.tcl225
-rw-r--r--db-4.8.30/test/bigfile001.tcl79
-rw-r--r--db-4.8.30/test/bigfile002.tcl45
-rw-r--r--db-4.8.30/test/byteorder.tcl33
-rw-r--r--db-4.8.30/test/conscript.tcl123
-rw-r--r--db-4.8.30/test/db_reptest.tcl778
-rw-r--r--db-4.8.30/test/dbm.tcl127
-rw-r--r--db-4.8.30/test/dbscript.tcl358
-rw-r--r--db-4.8.30/test/ddoyscript.tcl171
-rw-r--r--db-4.8.30/test/ddscript.tcl43
-rw-r--r--db-4.8.30/test/dead001.tcl86
-rw-r--r--db-4.8.30/test/dead002.tcl80
-rw-r--r--db-4.8.30/test/dead003.tcl98
-rw-r--r--db-4.8.30/test/dead004.tcl107
-rw-r--r--db-4.8.30/test/dead005.tcl88
-rw-r--r--db-4.8.30/test/dead006.tcl15
-rw-r--r--db-4.8.30/test/dead007.tcl35
-rw-r--r--db-4.8.30/test/env001.tcl145
-rw-r--r--db-4.8.30/test/env002.tcl155
-rw-r--r--db-4.8.30/test/env003.tcl148
-rw-r--r--db-4.8.30/test/env004.tcl94
-rw-r--r--db-4.8.30/test/env005.tcl51
-rw-r--r--db-4.8.30/test/env006.tcl90
-rw-r--r--db-4.8.30/test/env007.tcl701
-rw-r--r--db-4.8.30/test/env008.tcl72
-rw-r--r--db-4.8.30/test/env009.tcl81
-rw-r--r--db-4.8.30/test/env010.tcl49
-rw-r--r--db-4.8.30/test/env011.tcl38
-rw-r--r--db-4.8.30/test/env012.tcl393
-rw-r--r--db-4.8.30/test/env013.tcl84
-rw-r--r--db-4.8.30/test/env014.tcl117
-rw-r--r--db-4.8.30/test/env015.tcl85
-rw-r--r--db-4.8.30/test/env016.tcl135
-rw-r--r--db-4.8.30/test/env017.tcl582
-rw-r--r--db-4.8.30/test/env018.tcl57
-rw-r--r--db-4.8.30/test/envscript.tcl100
-rw-r--r--db-4.8.30/test/fop001.tcl243
-rw-r--r--db-4.8.30/test/fop002.tcl135
-rw-r--r--db-4.8.30/test/fop003.tcl94
-rw-r--r--db-4.8.30/test/fop004.tcl260
-rw-r--r--db-4.8.30/test/fop005.tcl147
-rw-r--r--db-4.8.30/test/fop006.tcl185
-rw-r--r--db-4.8.30/test/fop007.tcl21
-rw-r--r--db-4.8.30/test/fop008.tcl15
-rw-r--r--db-4.8.30/test/fopscript.tcl70
-rw-r--r--db-4.8.30/test/foputils.tcl484
-rw-r--r--db-4.8.30/test/hsearch.tcl50
-rw-r--r--db-4.8.30/test/include.tcl32
-rw-r--r--db-4.8.30/test/join.tcl454
-rw-r--r--db-4.8.30/test/lock001.tcl121
-rw-r--r--db-4.8.30/test/lock002.tcl154
-rw-r--r--db-4.8.30/test/lock003.tcl100
-rw-r--r--db-4.8.30/test/lock004.tcl28
-rw-r--r--db-4.8.30/test/lock005.tcl176
-rw-r--r--db-4.8.30/test/lock006.tcl186
-rw-r--r--db-4.8.30/test/lockscript.tcl116
-rw-r--r--db-4.8.30/test/log001.tcl143
-rw-r--r--db-4.8.30/test/log002.tcl101
-rw-r--r--db-4.8.30/test/log003.tcl143
-rw-r--r--db-4.8.30/test/log004.tcl51
-rw-r--r--db-4.8.30/test/log005.tcl117
-rw-r--r--db-4.8.30/test/log006.tcl230
-rw-r--r--db-4.8.30/test/log007.tcl110
-rw-r--r--db-4.8.30/test/log008.tcl46
-rw-r--r--db-4.8.30/test/log008script.tcl82
-rw-r--r--db-4.8.30/test/log009.tcl122
-rw-r--r--db-4.8.30/test/logtrack.list60
-rw-r--r--db-4.8.30/test/logtrack.tcl142
-rw-r--r--db-4.8.30/test/mdbscript.tcl402
-rw-r--r--db-4.8.30/test/memp001.tcl202
-rw-r--r--db-4.8.30/test/memp002.tcl67
-rw-r--r--db-4.8.30/test/memp003.tcl159
-rw-r--r--db-4.8.30/test/memp004.tcl82
-rw-r--r--db-4.8.30/test/mpoolscript.tcl170
-rw-r--r--db-4.8.30/test/mut001.tcl110
-rw-r--r--db-4.8.30/test/mut002.tcl52
-rw-r--r--db-4.8.30/test/mut002script.tcl39
-rw-r--r--db-4.8.30/test/mut003.tcl59
-rw-r--r--db-4.8.30/test/ndbm.tcl143
-rw-r--r--db-4.8.30/test/parallel.tcl398
-rw-r--r--db-4.8.30/test/plat001.tcl75
-rw-r--r--db-4.8.30/test/recd001.tcl258
-rw-r--r--db-4.8.30/test/recd002.tcl108
-rw-r--r--db-4.8.30/test/recd003.tcl125
-rw-r--r--db-4.8.30/test/recd004.tcl103
-rw-r--r--db-4.8.30/test/recd005.tcl241
-rw-r--r--db-4.8.30/test/recd006.tcl268
-rw-r--r--db-4.8.30/test/recd007.tcl1069
-rw-r--r--db-4.8.30/test/recd008.tcl226
-rw-r--r--db-4.8.30/test/recd009.tcl179
-rw-r--r--db-4.8.30/test/recd010.tcl256
-rw-r--r--db-4.8.30/test/recd011.tcl135
-rw-r--r--db-4.8.30/test/recd012.tcl433
-rw-r--r--db-4.8.30/test/recd013.tcl291
-rw-r--r--db-4.8.30/test/recd014.tcl446
-rw-r--r--db-4.8.30/test/recd015.tcl151
-rw-r--r--db-4.8.30/test/recd016.tcl180
-rw-r--r--db-4.8.30/test/recd017.tcl157
-rw-r--r--db-4.8.30/test/recd018.tcl109
-rw-r--r--db-4.8.30/test/recd019.tcl122
-rw-r--r--db-4.8.30/test/recd020.tcl81
-rw-r--r--db-4.8.30/test/recd021.tcl278
-rw-r--r--db-4.8.30/test/recd022.tcl136
-rw-r--r--db-4.8.30/test/recd023.tcl91
-rw-r--r--db-4.8.30/test/recd024.tcl81
-rw-r--r--db-4.8.30/test/recd15scr.tcl73
-rw-r--r--db-4.8.30/test/recdscript.tcl37
-rw-r--r--db-4.8.30/test/rep001.tcl229
-rw-r--r--db-4.8.30/test/rep002.tcl330
-rw-r--r--db-4.8.30/test/rep003.tcl304
-rw-r--r--db-4.8.30/test/rep005.tcl364
-rw-r--r--db-4.8.30/test/rep006.tcl213
-rw-r--r--db-4.8.30/test/rep007.tcl265
-rw-r--r--db-4.8.30/test/rep008.tcl146
-rw-r--r--db-4.8.30/test/rep009.tcl198
-rw-r--r--db-4.8.30/test/rep010.tcl266
-rw-r--r--db-4.8.30/test/rep011.tcl195
-rw-r--r--db-4.8.30/test/rep012.tcl292
-rw-r--r--db-4.8.30/test/rep013.tcl299
-rw-r--r--db-4.8.30/test/rep014.tcl202
-rw-r--r--db-4.8.30/test/rep015.tcl321
-rw-r--r--db-4.8.30/test/rep016.tcl293
-rw-r--r--db-4.8.30/test/rep017.tcl268
-rw-r--r--db-4.8.30/test/rep017script.tcl83
-rw-r--r--db-4.8.30/test/rep018.tcl193
-rw-r--r--db-4.8.30/test/rep018script.tcl98
-rw-r--r--db-4.8.30/test/rep019.tcl184
-rw-r--r--db-4.8.30/test/rep020.tcl331
-rw-r--r--db-4.8.30/test/rep021.tcl330
-rw-r--r--db-4.8.30/test/rep022.tcl316
-rw-r--r--db-4.8.30/test/rep023.tcl205
-rw-r--r--db-4.8.30/test/rep024.tcl236
-rw-r--r--db-4.8.30/test/rep025.tcl237
-rw-r--r--db-4.8.30/test/rep026.tcl294
-rw-r--r--db-4.8.30/test/rep027.tcl189
-rw-r--r--db-4.8.30/test/rep028.tcl248
-rw-r--r--db-4.8.30/test/rep029.tcl292
-rw-r--r--db-4.8.30/test/rep030.tcl388
-rw-r--r--db-4.8.30/test/rep031.tcl345
-rw-r--r--db-4.8.30/test/rep032.tcl200
-rw-r--r--db-4.8.30/test/rep033.tcl273
-rw-r--r--db-4.8.30/test/rep034.tcl393
-rw-r--r--db-4.8.30/test/rep035.tcl294
-rw-r--r--db-4.8.30/test/rep035script.tcl81
-rw-r--r--db-4.8.30/test/rep036.tcl209
-rw-r--r--db-4.8.30/test/rep036script.tcl125
-rw-r--r--db-4.8.30/test/rep037.tcl248
-rw-r--r--db-4.8.30/test/rep038.tcl293
-rw-r--r--db-4.8.30/test/rep039.tcl472
-rw-r--r--db-4.8.30/test/rep040.tcl249
-rw-r--r--db-4.8.30/test/rep040script.tcl74
-rw-r--r--db-4.8.30/test/rep041.tcl231
-rw-r--r--db-4.8.30/test/rep042.tcl202
-rw-r--r--db-4.8.30/test/rep042script.tcl78
-rw-r--r--db-4.8.30/test/rep043.tcl246
-rw-r--r--db-4.8.30/test/rep043script.tcl125
-rw-r--r--db-4.8.30/test/rep044.tcl287
-rw-r--r--db-4.8.30/test/rep045.tcl286
-rw-r--r--db-4.8.30/test/rep045script.tcl142
-rw-r--r--db-4.8.30/test/rep046.tcl339
-rw-r--r--db-4.8.30/test/rep047.tcl266
-rw-r--r--db-4.8.30/test/rep048.tcl186
-rw-r--r--db-4.8.30/test/rep048script.tcl84
-rw-r--r--db-4.8.30/test/rep049.tcl240
-rw-r--r--db-4.8.30/test/rep050.tcl362
-rw-r--r--db-4.8.30/test/rep051.tcl243
-rw-r--r--db-4.8.30/test/rep052.tcl252
-rw-r--r--db-4.8.30/test/rep053.tcl227
-rw-r--r--db-4.8.30/test/rep054.tcl275
-rw-r--r--db-4.8.30/test/rep055.tcl242
-rw-r--r--db-4.8.30/test/rep058.tcl149
-rw-r--r--db-4.8.30/test/rep060.tcl346
-rw-r--r--db-4.8.30/test/rep061.tcl443
-rw-r--r--db-4.8.30/test/rep062.tcl321
-rw-r--r--db-4.8.30/test/rep063.tcl397
-rw-r--r--db-4.8.30/test/rep064.tcl168
-rw-r--r--db-4.8.30/test/rep065.tcl444
-rw-r--r--db-4.8.30/test/rep065script.tcl416
-rw-r--r--db-4.8.30/test/rep066.tcl269
-rw-r--r--db-4.8.30/test/rep067.tcl395
-rw-r--r--db-4.8.30/test/rep068.tcl206
-rw-r--r--db-4.8.30/test/rep069.tcl295
-rw-r--r--db-4.8.30/test/rep070.tcl181
-rw-r--r--db-4.8.30/test/rep071.tcl166
-rw-r--r--db-4.8.30/test/rep072.tcl211
-rw-r--r--db-4.8.30/test/rep073.tcl193
-rw-r--r--db-4.8.30/test/rep074.tcl197
-rw-r--r--db-4.8.30/test/rep075.tcl551
-rw-r--r--db-4.8.30/test/rep076.tcl203
-rw-r--r--db-4.8.30/test/rep077.tcl158
-rw-r--r--db-4.8.30/test/rep078.tcl345
-rw-r--r--db-4.8.30/test/rep078script.tcl123
-rw-r--r--db-4.8.30/test/rep079.tcl329
-rw-r--r--db-4.8.30/test/rep080.tcl189
-rw-r--r--db-4.8.30/test/rep081.tcl288
-rw-r--r--db-4.8.30/test/rep082.tcl209
-rw-r--r--db-4.8.30/test/rep083.tcl161
-rw-r--r--db-4.8.30/test/rep084.tcl142
-rw-r--r--db-4.8.30/test/rep085.tcl154
-rw-r--r--db-4.8.30/test/rep086.tcl146
-rw-r--r--db-4.8.30/test/rep087.tcl221
-rw-r--r--db-4.8.30/test/rep088.tcl248
-rw-r--r--db-4.8.30/test/repmgr001.tcl43
-rw-r--r--db-4.8.30/test/repmgr002.tcl44
-rw-r--r--db-4.8.30/test/repmgr003.tcl43
-rw-r--r--db-4.8.30/test/repmgr004.tcl43
-rw-r--r--db-4.8.30/test/repmgr005.tcl43
-rw-r--r--db-4.8.30/test/repmgr006.tcl43
-rw-r--r--db-4.8.30/test/repmgr007.tcl160
-rw-r--r--db-4.8.30/test/repmgr008.tcl43
-rw-r--r--db-4.8.30/test/repmgr009.tcl184
-rw-r--r--db-4.8.30/test/repmgr010.tcl181
-rw-r--r--db-4.8.30/test/repmgr011.tcl129
-rw-r--r--db-4.8.30/test/repmgr012.tcl136
-rw-r--r--db-4.8.30/test/repmgr013.tcl129
-rw-r--r--db-4.8.30/test/repmgr014.tcl44
-rw-r--r--db-4.8.30/test/repmgr015.tcl46
-rw-r--r--db-4.8.30/test/repmgr016.tcl45
-rw-r--r--db-4.8.30/test/repmgr017.tcl169
-rw-r--r--db-4.8.30/test/repmgr018.tcl153
-rw-r--r--db-4.8.30/test/repmgr019.tcl44
-rw-r--r--db-4.8.30/test/repmgr022.tcl105
-rw-r--r--db-4.8.30/test/repmgr023.tcl109
-rw-r--r--db-4.8.30/test/repmgr024.tcl140
-rw-r--r--db-4.8.30/test/repmgr025.tcl173
-rw-r--r--db-4.8.30/test/repmgr026.tcl179
-rw-r--r--db-4.8.30/test/repmgr027.tcl216
-rw-r--r--db-4.8.30/test/repmgr028.tcl130
-rw-r--r--db-4.8.30/test/repmgr029.tcl122
-rw-r--r--db-4.8.30/test/repmgr030.tcl97
-rw-r--r--db-4.8.30/test/repmgr031.tcl209
-rw-r--r--db-4.8.30/test/repmgr032.tcl198
-rw-r--r--db-4.8.30/test/reputils.tcl2743
-rw-r--r--db-4.8.30/test/reputilsnoenv.tcl509
-rw-r--r--db-4.8.30/test/rpc001.tcl476
-rw-r--r--db-4.8.30/test/rpc002.tcl161
-rw-r--r--db-4.8.30/test/rpc003.tcl184
-rw-r--r--db-4.8.30/test/rpc004.tcl87
-rw-r--r--db-4.8.30/test/rpc005.tcl158
-rw-r--r--db-4.8.30/test/rpc006.tcl77
-rw-r--r--db-4.8.30/test/rsrc001.tcl215
-rw-r--r--db-4.8.30/test/rsrc002.tcl65
-rw-r--r--db-4.8.30/test/rsrc003.tcl178
-rw-r--r--db-4.8.30/test/rsrc004.tcl51
-rw-r--r--db-4.8.30/test/scr001/chk.code38
-rw-r--r--db-4.8.30/test/scr002/chk.def67
-rw-r--r--db-4.8.30/test/scr003/chk.define106
-rw-r--r--db-4.8.30/test/scr004/chk.javafiles30
-rw-r--r--db-4.8.30/test/scr005/chk.nl114
-rw-r--r--db-4.8.30/test/scr006/chk.offt57
-rw-r--r--db-4.8.30/test/scr007/chk.proto44
-rw-r--r--db-4.8.30/test/scr008/chk.pubdef189
-rw-r--r--db-4.8.30/test/scr009/chk.srcfiles50
-rw-r--r--db-4.8.30/test/scr010/chk.str42
-rw-r--r--db-4.8.30/test/scr010/spell.ok4903
-rw-r--r--db-4.8.30/test/scr011/chk.tags54
-rw-r--r--db-4.8.30/test/scr012/chk.vx_code85
-rw-r--r--db-4.8.30/test/scr013/chk.stats128
-rw-r--r--db-4.8.30/test/scr014/chk.err34
-rw-r--r--db-4.8.30/test/scr015/README36
-rw-r--r--db-4.8.30/test/scr015/TestConstruct01.cpp319
-rw-r--r--db-4.8.30/test/scr015/TestConstruct01.testerr0
-rw-r--r--db-4.8.30/test/scr015/TestConstruct01.testout27
-rw-r--r--db-4.8.30/test/scr015/TestGetSetMethods.cpp86
-rw-r--r--db-4.8.30/test/scr015/TestKeyRange.cpp168
-rw-r--r--db-4.8.30/test/scr015/TestKeyRange.testin8
-rw-r--r--db-4.8.30/test/scr015/TestKeyRange.testout19
-rw-r--r--db-4.8.30/test/scr015/TestLogc.cpp106
-rw-r--r--db-4.8.30/test/scr015/TestLogc.testout1
-rw-r--r--db-4.8.30/test/scr015/TestSimpleAccess.cpp66
-rw-r--r--db-4.8.30/test/scr015/TestSimpleAccess.testout3
-rw-r--r--db-4.8.30/test/scr015/TestTruncate.cpp83
-rw-r--r--db-4.8.30/test/scr015/TestTruncate.testout6
-rw-r--r--db-4.8.30/test/scr015/chk.cxxtests73
-rw-r--r--db-4.8.30/test/scr015/ignore4
-rw-r--r--db-4.8.30/test/scr015/testall32
-rw-r--r--db-4.8.30/test/scr015/testone122
-rw-r--r--db-4.8.30/test/scr016/Makefile18
-rw-r--r--db-4.8.30/test/scr016/README14
-rw-r--r--db-4.8.30/test/scr016/chk.bdb75
-rw-r--r--db-4.8.30/test/scr016/makenewtest.sh117
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/AppendRecnoTest.java209
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/AssociateTest.java252
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/CallbackTest.java159
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/ClosedDbTest.java86
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/DatabaseTest.java377
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/EncryptTest.java138
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/HashCompareTest.java125
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/LogCursorTest.java101
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/MultipleCursorTest.java239
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/PartialGetTest.java264
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrConfigTest.java356
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrElectionTest.java205
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrStartupTest.java216
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/TestUtils.java234
-rw-r--r--db-4.8.30/test/scr016/src/com/sleepycat/db/test/VerboseConfigTest.java91
-rw-r--r--db-4.8.30/test/scr017/O.BH196
-rw-r--r--db-4.8.30/test/scr017/O.R196
-rw-r--r--db-4.8.30/test/scr017/chk.db18534
-rw-r--r--db-4.8.30/test/scr017/t.c194
-rw-r--r--db-4.8.30/test/scr018/chk.comma30
-rw-r--r--db-4.8.30/test/scr018/t.c53
-rw-r--r--db-4.8.30/test/scr019/chk.include49
-rw-r--r--db-4.8.30/test/scr020/chk.inc45
-rw-r--r--db-4.8.30/test/scr021/chk.flags174
-rw-r--r--db-4.8.30/test/scr021/t.c79
-rw-r--r--db-4.8.30/test/scr022/chk.rr34
-rw-r--r--db-4.8.30/test/scr023/chk.q26
-rw-r--r--db-4.8.30/test/scr023/q.c840
-rw-r--r--db-4.8.30/test/scr024/Makefile33
-rw-r--r--db-4.8.30/test/scr024/README51
-rw-r--r--db-4.8.30/test/scr024/build.xml415
-rw-r--r--db-4.8.30/test/scr024/chk.bdb81
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java127
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java37
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java330
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java56
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java484
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java136
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java426
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java927
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java477
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java440
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java3048
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java129
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/ForeignKeyTest.java342
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java228
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/JoinTest.java225
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java32
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java206
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java33
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntity.java44
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java63
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEnv.java130
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java41
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java56
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestSR15721.java119
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestStore.java279
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java838
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java97
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java177
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java154
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java70
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original72
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java245
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/db/util/DualTestCase.java88
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/BindingTest.java2425
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced0.java36
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced1.java260
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced2.java110
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced3.java176
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveCase.java205
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java6818
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java.original2855
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTest.java255
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestBase.java438
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestInit.java53
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/ForeignKeyTest.java329
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/IndexTest.java874
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/JoinTest.java176
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/NegativeTest.java644
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/OperationTest.java1552
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/PersistTestUtils.java49
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SequenceTest.java469
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SubclassIndexTest.java251
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java134
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/util/test/FastOutputStreamTest.java66
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/util/test/PackedIntegerTest.java191
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/util/test/SharedTestUtils.java178
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/util/test/TestEnv.java142
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/util/test/TxnTestCase.java230
-rw-r--r--db-4.8.30/test/scr024/src/com/sleepycat/util/test/UtfTest.java163
-rw-r--r--db-4.8.30/test/scr025/TestMulti.cpp206
-rw-r--r--db-4.8.30/test/scr025/chk.cxxmulti69
-rw-r--r--db-4.8.30/test/scr025/ignore4
-rw-r--r--db-4.8.30/test/scr026/chk.method108
-rw-r--r--db-4.8.30/test/scr027/chk.javas10
-rw-r--r--db-4.8.30/test/scr028/chk.rtc29
-rw-r--r--db-4.8.30/test/scr028/t.c95
-rw-r--r--db-4.8.30/test/scr029/chk.get29
-rw-r--r--db-4.8.30/test/scr029/t.c209
-rw-r--r--db-4.8.30/test/scr030/chk.build124
-rw-r--r--db-4.8.30/test/scr031/chk.copy46
-rw-r--r--db-4.8.30/test/scr032/chk.rpc82
-rw-r--r--db-4.8.30/test/scr033/chk.codegen43
-rw-r--r--db-4.8.30/test/scr034/chk.mtx34
-rw-r--r--db-4.8.30/test/scr035/chk.osdir27
-rw-r--r--db-4.8.30/test/scr037/AllTestData.xml796
-rw-r--r--db-4.8.30/test/scr037/BTreeCursorTest.cs1192
-rw-r--r--db-4.8.30/test/scr037/BTreeDatabaseConfigTest.cs92
-rw-r--r--db-4.8.30/test/scr037/BTreeDatabaseTest.cs2309
-rw-r--r--db-4.8.30/test/scr037/Configuration.cs1107
-rw-r--r--db-4.8.30/test/scr037/CursorConfigTest.cs81
-rw-r--r--db-4.8.30/test/scr037/CursorTest.cs1459
-rw-r--r--db-4.8.30/test/scr037/DatabaseConfigTest.cs109
-rw-r--r--db-4.8.30/test/scr037/DatabaseEnvironmentConfigTest.cs299
-rw-r--r--db-4.8.30/test/scr037/DatabaseEnvironmentTest.cs1778
-rw-r--r--db-4.8.30/test/scr037/DatabaseExceptionTest.cs221
-rw-r--r--db-4.8.30/test/scr037/DatabaseTest.cs109
-rw-r--r--db-4.8.30/test/scr037/DotNetTest.csproj116
-rw-r--r--db-4.8.30/test/scr037/ForeignKeyTest.cs280
-rw-r--r--db-4.8.30/test/scr037/HashCursorTest.cs237
-rw-r--r--db-4.8.30/test/scr037/HashDatabaseConfigTest.cs85
-rw-r--r--db-4.8.30/test/scr037/HashDatabaseTest.cs466
-rw-r--r--db-4.8.30/test/scr037/JoinCursorTest.cs204
-rw-r--r--db-4.8.30/test/scr037/LockTest.cs116
-rw-r--r--db-4.8.30/test/scr037/LockingConfigTest.cs162
-rw-r--r--db-4.8.30/test/scr037/LogConfigTest.cs297
-rw-r--r--db-4.8.30/test/scr037/LogCursorTest.cs321
-rw-r--r--db-4.8.30/test/scr037/MPoolConfigTest.cs85
-rw-r--r--db-4.8.30/test/scr037/MutexConfigTest.cs82
-rw-r--r--db-4.8.30/test/scr037/MutexTest.cs126
-rw-r--r--db-4.8.30/test/scr037/QueueDatabaseConfigTest.cs88
-rw-r--r--db-4.8.30/test/scr037/QueueDatabaseTest.cs646
-rw-r--r--db-4.8.30/test/scr037/README1
-rw-r--r--db-4.8.30/test/scr037/RecnoCursorTest.cs257
-rw-r--r--db-4.8.30/test/scr037/RecnoDatabaseConfigTest.cs131
-rw-r--r--db-4.8.30/test/scr037/RecnoDatabaseTest.cs487
-rw-r--r--db-4.8.30/test/scr037/ReplicationConfigTest.cs215
-rw-r--r--db-4.8.30/test/scr037/ReplicationTest.cs748
-rw-r--r--db-4.8.30/test/scr037/SecondaryBTreeDatabaseConfigTest.cs115
-rw-r--r--db-4.8.30/test/scr037/SecondaryBTreeDatabaseTest.cs232
-rw-r--r--db-4.8.30/test/scr037/SecondaryCursorTest.cs1214
-rw-r--r--db-4.8.30/test/scr037/SecondaryDatabaseConfigTest.cs83
-rw-r--r--db-4.8.30/test/scr037/SecondaryDatabaseTest.cs518
-rw-r--r--db-4.8.30/test/scr037/SecondaryHashDatabaseConfigTest.cs109
-rw-r--r--db-4.8.30/test/scr037/SecondaryHashDatabaseTest.cs403
-rw-r--r--db-4.8.30/test/scr037/SecondaryQueueDatabaseConfigTest.cs109
-rw-r--r--db-4.8.30/test/scr037/SecondaryQueueDatabaseTest.cs227
-rw-r--r--db-4.8.30/test/scr037/SecondaryRecnoDatabaseConfigTest.cs119
-rw-r--r--db-4.8.30/test/scr037/SecondaryRecnoDatabaseTest.cs269
-rw-r--r--db-4.8.30/test/scr037/SequenceConfigTest.cs132
-rw-r--r--db-4.8.30/test/scr037/SequenceTest.cs406
-rw-r--r--db-4.8.30/test/scr037/TestException.cs49
-rw-r--r--db-4.8.30/test/scr037/TransactionConfigTest.cs73
-rw-r--r--db-4.8.30/test/scr037/TransactionTest.cs435
-rw-r--r--db-4.8.30/test/scr037/XMLReader.cs48
-rw-r--r--db-4.8.30/test/scr037/bdb4.7.dbbin0 -> 16384 bytes
-rw-r--r--db-4.8.30/test/scr038/chk.bdb10
-rw-r--r--db-4.8.30/test/scr038/data/17925.sql47
-rw-r--r--db-4.8.30/test/scr038/data/all_data_types.sql17
-rw-r--r--db-4.8.30/test/scr038/data/bigint_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/bin_table.sql3
-rw-r--r--db-4.8.30/test/scr038/data/bit_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/btree_table.sql6
-rw-r--r--db-4.8.30/test/scr038/data/cachesize.sql4
-rw-r--r--db-4.8.30/test/scr038/data/char_length.sql5
-rw-r--r--db-4.8.30/test/scr038/data/char_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/column_name_conflict_with_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/commented_dml.sql6
-rw-r--r--db-4.8.30/test/scr038/data/comments_at_the_end.sql6
-rw-r--r--db-4.8.30/test/scr038/data/constraints_name_conflict_with_column.sql13
-rw-r--r--db-4.8.30/test/scr038/data/constraints_on_nonexistent_column.sql14
-rw-r--r--db-4.8.30/test/scr038/data/constraints_on_nonexistent_table.sql9
-rw-r--r--db-4.8.30/test/scr038/data/database_without_name.sql9
-rw-r--r--db-4.8.30/test/scr038/data/decimal_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/dml.sql6
-rw-r--r--db-4.8.30/test/scr038/data/double_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/float_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/hash_table.sql7
-rw-r--r--db-4.8.30/test/scr038/data/index_name_conflict_with_column.sql18
-rw-r--r--db-4.8.30/test/scr038/data/index_without_name.sql8
-rw-r--r--db-4.8.30/test/scr038/data/int_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/integer_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/many_tables.sql93
-rw-r--r--db-4.8.30/test/scr038/data/no_primary_key_constraint.sql4
-rw-r--r--db-4.8.30/test/scr038/data/nonexistent_column_constraints.sql13
-rw-r--r--db-4.8.30/test/scr038/data/numeric_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/only_database.sql1
-rw-r--r--db-4.8.30/test/scr038/data/only_index.sql3
-rw-r--r--db-4.8.30/test/scr038/data/only_table.sql6
-rw-r--r--db-4.8.30/test/scr038/data/real_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/sample.sql20
-rw-r--r--db-4.8.30/test/scr038/data/smallint_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/table_name_conflict_with_database.sql3
-rw-r--r--db-4.8.30/test/scr038/data/table_without_name.sql9
-rw-r--r--db-4.8.30/test/scr038/data/tiny_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/two_different_databases.sql10
-rw-r--r--db-4.8.30/test/scr038/data/two_different_indexes.sql26
-rw-r--r--db-4.8.30/test/scr038/data/two_different_tables.sql8
-rw-r--r--db-4.8.30/test/scr038/data/two_indexes_on_same_column.sql10
-rw-r--r--db-4.8.30/test/scr038/data/two_same_columns.sql12
-rw-r--r--db-4.8.30/test/scr038/data/two_same_databases.sql11
-rw-r--r--db-4.8.30/test/scr038/data/two_same_indexes.sql11
-rw-r--r--db-4.8.30/test/scr038/data/two_same_name_columns.sql9
-rw-r--r--db-4.8.30/test/scr038/data/two_same_name_columns_in_different_tables.sql6
-rw-r--r--db-4.8.30/test/scr038/data/two_same_name_indexes.sql11
-rw-r--r--db-4.8.30/test/scr038/data/two_same_name_tables.sql8
-rw-r--r--db-4.8.30/test/scr038/data/two_same_tables.sql7
-rw-r--r--db-4.8.30/test/scr038/data/unsupported_access_method.sql8
-rw-r--r--db-4.8.30/test/scr038/data/unsupported_data_type.sql4
-rw-r--r--db-4.8.30/test/scr038/data/varchar_length.sql4
-rw-r--r--db-4.8.30/test/scr038/data/varchar_table.sql4
-rw-r--r--db-4.8.30/test/scr038/data/wrong_create_sequence.sql10
-rw-r--r--db-4.8.30/test/scr038/nMakefile14
-rw-r--r--db-4.8.30/test/sdb001.tcl146
-rw-r--r--db-4.8.30/test/sdb002.tcl227
-rw-r--r--db-4.8.30/test/sdb003.tcl186
-rw-r--r--db-4.8.30/test/sdb004.tcl240
-rw-r--r--db-4.8.30/test/sdb005.tcl161
-rw-r--r--db-4.8.30/test/sdb006.tcl168
-rw-r--r--db-4.8.30/test/sdb007.tcl108
-rw-r--r--db-4.8.30/test/sdb008.tcl93
-rw-r--r--db-4.8.30/test/sdb009.tcl107
-rw-r--r--db-4.8.30/test/sdb010.tcl169
-rw-r--r--db-4.8.30/test/sdb011.tcl140
-rw-r--r--db-4.8.30/test/sdb012.tcl434
-rw-r--r--db-4.8.30/test/sdb013.tcl179
-rw-r--r--db-4.8.30/test/sdb014.tcl112
-rw-r--r--db-4.8.30/test/sdb015.tcl117
-rw-r--r--db-4.8.30/test/sdb016.tcl98
-rw-r--r--db-4.8.30/test/sdb017.tcl99
-rw-r--r--db-4.8.30/test/sdb018.tcl156
-rw-r--r--db-4.8.30/test/sdb019.tcl139
-rw-r--r--db-4.8.30/test/sdb020.tcl124
-rw-r--r--db-4.8.30/test/sdbscript.tcl46
-rw-r--r--db-4.8.30/test/sdbtest001.tcl149
-rw-r--r--db-4.8.30/test/sdbtest002.tcl167
-rw-r--r--db-4.8.30/test/sdbutils.tcl196
-rw-r--r--db-4.8.30/test/sec001.tcl222
-rw-r--r--db-4.8.30/test/sec002.tcl180
-rw-r--r--db-4.8.30/test/shelltest.tcl105
-rw-r--r--db-4.8.30/test/si001.tcl282
-rw-r--r--db-4.8.30/test/si002.tcl235
-rw-r--r--db-4.8.30/test/si003.tcl179
-rw-r--r--db-4.8.30/test/si004.tcl233
-rw-r--r--db-4.8.30/test/si005.tcl170
-rw-r--r--db-4.8.30/test/si006.tcl186
-rw-r--r--db-4.8.30/test/si007.tcl188
-rw-r--r--db-4.8.30/test/si008.tcl273
-rw-r--r--db-4.8.30/test/sijointest.tcl179
-rw-r--r--db-4.8.30/test/siutils.tcl292
-rw-r--r--db-4.8.30/test/sysscript.tcl282
-rw-r--r--db-4.8.30/test/t106script.tcl331
-rw-r--r--db-4.8.30/test/test.tcl2633
-rw-r--r--db-4.8.30/test/test001.tcl221
-rw-r--r--db-4.8.30/test/test002.tcl160
-rw-r--r--db-4.8.30/test/test003.tcl204
-rw-r--r--db-4.8.30/test/test004.tcl168
-rw-r--r--db-4.8.30/test/test005.tcl18
-rw-r--r--db-4.8.30/test/test006.tcl199
-rw-r--r--db-4.8.30/test/test007.tcl18
-rw-r--r--db-4.8.30/test/test008.tcl199
-rw-r--r--db-4.8.30/test/test009.tcl17
-rw-r--r--db-4.8.30/test/test010.tcl181
-rw-r--r--db-4.8.30/test/test011.tcl475
-rw-r--r--db-4.8.30/test/test012.tcl138
-rw-r--r--db-4.8.30/test/test013.tcl239
-rw-r--r--db-4.8.30/test/test014.tcl252
-rw-r--r--db-4.8.30/test/test015.tcl284
-rw-r--r--db-4.8.30/test/test016.tcl206
-rw-r--r--db-4.8.30/test/test017.tcl321
-rw-r--r--db-4.8.30/test/test018.tcl20
-rw-r--r--db-4.8.30/test/test019.tcl135
-rw-r--r--db-4.8.30/test/test020.tcl141
-rw-r--r--db-4.8.30/test/test021.tcl161
-rw-r--r--db-4.8.30/test/test022.tcl61
-rw-r--r--db-4.8.30/test/test023.tcl225
-rw-r--r--db-4.8.30/test/test024.tcl276
-rw-r--r--db-4.8.30/test/test025.tcl145
-rw-r--r--db-4.8.30/test/test026.tcl159
-rw-r--r--db-4.8.30/test/test027.tcl16
-rw-r--r--db-4.8.30/test/test028.tcl224
-rw-r--r--db-4.8.30/test/test029.tcl255
-rw-r--r--db-4.8.30/test/test030.tcl259
-rw-r--r--db-4.8.30/test/test031.tcl234
-rw-r--r--db-4.8.30/test/test032.tcl266
-rw-r--r--db-4.8.30/test/test033.tcl181
-rw-r--r--db-4.8.30/test/test034.tcl35
-rw-r--r--db-4.8.30/test/test035.tcl21
-rw-r--r--db-4.8.30/test/test036.tcl172
-rw-r--r--db-4.8.30/test/test037.tcl199
-rw-r--r--db-4.8.30/test/test038.tcl232
-rw-r--r--db-4.8.30/test/test039.tcl217
-rw-r--r--db-4.8.30/test/test040.tcl22
-rw-r--r--db-4.8.30/test/test041.tcl17
-rw-r--r--db-4.8.30/test/test042.tcl186
-rw-r--r--db-4.8.30/test/test043.tcl191
-rw-r--r--db-4.8.30/test/test044.tcl262
-rw-r--r--db-4.8.30/test/test045.tcl125
-rw-r--r--db-4.8.30/test/test046.tcl820
-rw-r--r--db-4.8.30/test/test047.tcl261
-rw-r--r--db-4.8.30/test/test048.tcl178
-rw-r--r--db-4.8.30/test/test049.tcl186
-rw-r--r--db-4.8.30/test/test050.tcl220
-rw-r--r--db-4.8.30/test/test051.tcl225
-rw-r--r--db-4.8.30/test/test052.tcl268
-rw-r--r--db-4.8.30/test/test053.tcl241
-rw-r--r--db-4.8.30/test/test054.tcl459
-rw-r--r--db-4.8.30/test/test055.tcl140
-rw-r--r--db-4.8.30/test/test056.tcl174
-rw-r--r--db-4.8.30/test/test057.tcl207
-rw-r--r--db-4.8.30/test/test058.tcl110
-rw-r--r--db-4.8.30/test/test059.tcl149
-rw-r--r--db-4.8.30/test/test060.tcl59
-rw-r--r--db-4.8.30/test/test061.tcl231
-rw-r--r--db-4.8.30/test/test062.tcl159
-rw-r--r--db-4.8.30/test/test063.tcl173
-rw-r--r--db-4.8.30/test/test064.tcl68
-rw-r--r--db-4.8.30/test/test065.tcl207
-rw-r--r--db-4.8.30/test/test066.tcl103
-rw-r--r--db-4.8.30/test/test067.tcl163
-rw-r--r--db-4.8.30/test/test068.tcl233
-rw-r--r--db-4.8.30/test/test069.tcl13
-rw-r--r--db-4.8.30/test/test070.tcl137
-rw-r--r--db-4.8.30/test/test071.tcl15
-rw-r--r--db-4.8.30/test/test072.tcl258
-rw-r--r--db-4.8.30/test/test073.tcl296
-rw-r--r--db-4.8.30/test/test074.tcl276
-rw-r--r--db-4.8.30/test/test076.tcl90
-rw-r--r--db-4.8.30/test/test077.tcl92
-rw-r--r--db-4.8.30/test/test078.tcl252
-rw-r--r--db-4.8.30/test/test079.tcl28
-rw-r--r--db-4.8.30/test/test081.tcl14
-rw-r--r--db-4.8.30/test/test082.tcl13
-rw-r--r--db-4.8.30/test/test083.tcl174
-rw-r--r--db-4.8.30/test/test084.tcl52
-rw-r--r--db-4.8.30/test/test085.tcl340
-rw-r--r--db-4.8.30/test/test086.tcl168
-rw-r--r--db-4.8.30/test/test087.tcl293
-rw-r--r--db-4.8.30/test/test088.tcl176
-rw-r--r--db-4.8.30/test/test089.tcl275
-rw-r--r--db-4.8.30/test/test090.tcl15
-rw-r--r--db-4.8.30/test/test091.tcl19
-rw-r--r--db-4.8.30/test/test092.tcl252
-rw-r--r--db-4.8.30/test/test093.tcl434
-rw-r--r--db-4.8.30/test/test094.tcl206
-rw-r--r--db-4.8.30/test/test095.tcl369
-rw-r--r--db-4.8.30/test/test096.tcl393
-rw-r--r--db-4.8.30/test/test097.tcl192
-rw-r--r--db-4.8.30/test/test098.tcl90
-rw-r--r--db-4.8.30/test/test099.tcl275
-rw-r--r--db-4.8.30/test/test100.tcl16
-rw-r--r--db-4.8.30/test/test101.tcl16
-rw-r--r--db-4.8.30/test/test102.tcl234
-rw-r--r--db-4.8.30/test/test103.tcl222
-rw-r--r--db-4.8.30/test/test106.tcl113
-rw-r--r--db-4.8.30/test/test107.tcl168
-rw-r--r--db-4.8.30/test/test109.tcl322
-rw-r--r--db-4.8.30/test/test110.tcl168
-rw-r--r--db-4.8.30/test/test111.tcl370
-rw-r--r--db-4.8.30/test/test112.tcl285
-rw-r--r--db-4.8.30/test/test113.tcl267
-rw-r--r--db-4.8.30/test/test114.tcl339
-rw-r--r--db-4.8.30/test/test115.tcl362
-rw-r--r--db-4.8.30/test/test116.tcl303
-rw-r--r--db-4.8.30/test/test117.tcl205
-rw-r--r--db-4.8.30/test/test119.tcl258
-rw-r--r--db-4.8.30/test/test120.tcl98
-rw-r--r--db-4.8.30/test/test121.tcl125
-rw-r--r--db-4.8.30/test/test122.tcl103
-rw-r--r--db-4.8.30/test/test123.tcl80
-rw-r--r--db-4.8.30/test/test125.tcl205
-rw-r--r--db-4.8.30/test/testparams.tcl511
-rw-r--r--db-4.8.30/test/testutils.tcl3908
-rw-r--r--db-4.8.30/test/txn001.tcl114
-rw-r--r--db-4.8.30/test/txn002.tcl89
-rw-r--r--db-4.8.30/test/txn003.tcl230
-rw-r--r--db-4.8.30/test/txn004.tcl60
-rw-r--r--db-4.8.30/test/txn005.tcl73
-rw-r--r--db-4.8.30/test/txn006.tcl45
-rw-r--r--db-4.8.30/test/txn007.tcl56
-rw-r--r--db-4.8.30/test/txn008.tcl30
-rw-r--r--db-4.8.30/test/txn009.tcl30
-rw-r--r--db-4.8.30/test/txn010.tcl143
-rw-r--r--db-4.8.30/test/txn011.tcl224
-rw-r--r--db-4.8.30/test/txn012.tcl61
-rw-r--r--db-4.8.30/test/txn012script.tcl33
-rw-r--r--db-4.8.30/test/txn013.tcl76
-rw-r--r--db-4.8.30/test/txn014.tcl158
-rw-r--r--db-4.8.30/test/txnscript.tcl66
-rw-r--r--db-4.8.30/test/update.tcl92
-rw-r--r--db-4.8.30/test/upgrade.tcl855
-rw-r--r--db-4.8.30/test/wordlist10001
-rw-r--r--db-4.8.30/test/wrap.tcl99
-rw-r--r--db-4.8.30/test/wrap_reptest.tcl59
679 files changed, 165270 insertions, 0 deletions
diff --git a/db-4.8.30/test/README b/db-4.8.30/test/README
new file mode 100644
index 0000000..8f0dd13
--- /dev/null
+++ b/db-4.8.30/test/README
@@ -0,0 +1,147 @@
+Rules for the Berkeley DB and Berkeley DB-XML test suites
+
+1. Test Naming
+
+The primary script for running Berkeley DB scripts is named
+'test.tcl'. The primary script for running DB-XML is named
+'xmltest.tcl'.
+
+Tests are named with a (prefix, test number) combination. The
+prefix indicates the type of test (lock, log, xml, etc.). The
+prefix 'test' is used for plain vanilla DB testing. Test numbers
+are 3 digits long, starting with 001.
+
+Procedures common to a group of tests, or to all tests, are placed
+in files named 'xxxutils.tcl'. At the moment, we have the following
+utilities files:
+
+testutils.tcl Utilities common to all DB tests
+reputils.tcl Utilities for replication testing.
+siutils.tcl Utilities for secondary index testing.
+xmlutils.tcl Utilities for XML testing.
+
+2. Internal test structure
+
+Each line within a test should be no more than 80 characters long.
+
+Each test starts with a section like the following:
+
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test001
+# TEST Small keys/data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+
+First we refer to the license and assert copyright, then comes the CVS
+header string. The section of lines beginning # TEST is used to
+automatically maintain the TESTS file, a listing of all tests and
+what they do. Use this section to briefly describe the test's purpose
+and structure.
+
+Next comes the main procedure of the test, which has the same name
+as the tcl file. The test should be liberally commented, and also
+should use 'puts' to send messages to the output file.
+
+Sections of a test are identified with letters: test001.a, test001.b,
+test001.c.
+
+Here's some typical output:
+
+ puts "Test$tnum: $method ($args) $nentries equal key/data pairs"
+ puts "\tTest$tnum.a: put/get loop"
+ puts "\tTest$tnum.b: dump file"
+ puts "\tTest$tnum.c: close, open, and dump file"
+ puts "\tTest$tnum.d: close, open, and dump file in reverse direction"
+
+The reporting of the current value of the args is particularly
+useful, allowing us to say at a glance that "testxxx is failing in
+btree" or whatever. Each line of output must begin with the test name.
+We use this to separate expected informational output from errors.
+
+Ancillary procedures follow the main procedure. Procedures used
+by more than one test should go into the appropriate XXXutils.tcl
+file.
+
+3. Reporting failures
+
+Failures in tests are reported with a message starting with the
+prefix "FAIL:". Failures in tests are usually caught with the
+error_check_good and error_check_bad routines to compare an
+actual return value to an expected return value. These routines
+take care of putting the "FAIL:" prefix on the message.
+
+4. Running tests
+
+Any single test can be run from the tclsh prompt by typing the
+name of the test. If it's a test from the 'testxxx' group, you
+should also specify the method you'd like to test:
+
+ log001
+ test001 btree
+
+To run one of the 'testxxx' tests for all methods, use the
+run_test procedure:
+
+ run_test test001
+
+Any group of tests (the subsystems lock, log, test, etc.) can be
+run by typing
+
+ r $sub
+
+where sub is the name of the subsystem.
+
+For any of the following methods
+
+run_method
+run_secmethod
+run_secenv
+run_reptest
+run_repmethod
+run_envmethod
+run_recd
+
+you can type
+
+run (suffix method start stop).
+
+For example, to run test010 through test020 in btree using
+run_method:
+
+ run method btree 10 20
+
+Or the same tests in repmethod:
+
+ run repmethod btree 10 20
+
+Notice the missing underbar.
+
+If you omit the start and stop numbers, you'll get all the tests:
+
+ run method btree
+
+run_recd is a special case, in that it runs the recdxxx tests;
+all the others run the testxxx tests.
+
+To run the standard test suite, type run_std at the tclsh prompt.
+To run all the tests, type run_all.
+
+If you are running run_std or run_all, you may use the run_parallel
+interface to speed things up or to test under conditions of high
+system load. Run_parallel creates a list of all tests in the run,
+reorders the tests randomly, then runs the tests in a number of
+parallel processes. To run run_std in five processes type
+
+ run_parallel 5 run_std
diff --git a/db-4.8.30/test/TESTS b/db-4.8.30/test/TESTS
new file mode 100644
index 0000000..1a7efa2
--- /dev/null
+++ b/db-4.8.30/test/TESTS
@@ -0,0 +1,3199 @@
+# Automatically built by dist/s_test; may require local editing.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Allocate and lock a self-blocking mutex. Start another process.
+ Try to lock the mutex again -- it will block.
+ Unlock the mutex from the other process, and the blocked
+ lock should be obtained. Clean up.
+ Do another test with a "-process-only" mutex. The second
+ process should not be able to unlock the mutex.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Allocate, lock, unlock, and free a bunch of mutexes.
+ Set basic configuration options and check mutex_stat and
+ the mutex getters for the correct values.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+backup
+ Test of hotbackup functionality.
+
+ Do all the of the following tests with and without
+ the -c (checkpoint) option. Make sure that -c and
+ -d (data_dir) are not allowed together.
+
+ (1) Test that plain and simple hotbackup works.
+ (2) Test with -data_dir (-d).
+ (3) Test updating an existing hot backup (-u).
+ (4) Test with absolute path.
+ (5) Test with DB_CONFIG (-D), setting log_dir (-l)
+ and data_dir (-d).
+ (6) DB_CONFIG and update.
+ (7) Repeat hot backup (non-update) with DB_CONFIG and
+ existing directories.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+bigfile001
+ Create a database greater than 4 GB in size. Close, verify.
+ Grow the database somewhat. Close, reverify. Lather, rinse,
+ repeat. Since it will not work on all systems, this test is
+ not run by default.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+bigfile002
+ This one should be faster and not require so much disk space,
+ although it doesn't test as extensively. Create an mpool file
+ with 1K pages. Dirty page 6000000. Sync.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dbm
+ Historic DBM interface test. Use the first 1000 entries from the
+ dictionary. Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Then reopen the file, re-retrieve everything. Finally, delete
+ everything.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+db_reptest
+ Wrapper to configure and run the db_reptest program.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead001
+ Use two different configurations to test deadlock detection among a
+ variable number of processes. One configuration has the processes
+ deadlocked in a ring. The other has the processes all deadlocked on
+ a single resource.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead002
+ Same test as dead001, but use "detect on every collision" instead
+ of separate deadlock detector.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead003
+
+ Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+ DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead006
+ use timeouts rather than the normal dd algorithm.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead007
+ Tests for locker and txn id wraparound.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env001
+ Test of env remove interface (formerly env_remove).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env002
+ Test of DB_LOG_DIR and env name resolution.
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the set_lg_dir option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- db_config and a file are present,
+ only the file is respected (see doc/env/naming.html).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env003
+ Test DB_TMP_DIR and env name resolution
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the DB_TMP_DIR config file option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the -tmp_dir config option is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- -tmp_dir and a file are present,
+ only the file is respected (see doc/env/naming.html).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env004
+ Test multiple data directories. Do a bunch of different opens
+ to make sure that the files are detected in different directories.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env005
+ Test that using subsystems without initializing them correctly
+ returns an error. Cannot test mpool, because it is assumed in
+ the Tcl code.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env006
+ Make sure that all the utilities exist and run.
+ Test that db_load -r options don't blow up.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env007
+ Test DB_CONFIG config file options for berkdb env.
+ 1) Make sure command line option is respected
+ 2) Make sure that config file option is respected
+ 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+ method is used, only the file is respected.
+ Then test all known config options.
+ Also test config options on berkdb open. This isn't
+ really env testing, but there's no better place to put it.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env008
+ Test environments and subdirectories.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env009
+ Test calls to all the various stat functions. We have several
+ sprinkled throughout the test suite, but this will ensure that
+ we run all of them at least once.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env010
+ Run recovery in an empty directory, and then make sure we can still
+ create a database in that directory.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env011
+ Run with region overwrite flag.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env012
+ Test DB_REGISTER.
+
+ DB_REGISTER will fail on systems without fcntl. If it
+ fails, make sure we got the expected DB_OPNOTSUP return.
+
+ Then, the real tests:
+ For each test, we start a process that opens an env with -register.
+
+ 1. Verify that a 2nd process can enter the existing env with -register.
+
+ 2. Kill the 1st process, and verify that the 2nd process can enter
+ with "-register -recover".
+
+ 3. Kill the 1st process, and verify that the 2nd process cannot
+ enter with just "-register".
+
+ 4. While the 1st process is still running, a 2nd process enters
+ with "-register". Kill the 1st process. Verify that a 3rd process
+ can enter with "-register -recover". Verify that the 3rd process,
+ entering, causes process 2 to fail with the message DB_RUNRECOVERY.
+
+ 5. We had a bug where recovery was always run with -register
+ if there were empty slots in the process registry file. Verify
+ that recovery doesn't automatically run if there is an empty slot.
+
+ 6. Verify process cannot connect when specifying -failchk and an
+ isalive function has not been declared.
+
+ 7. Verify that a 2nd process can enter the existing env with -register
+ and -failchk and having specified an isalive function
+
+ 8. Kill the 1st process, and verify that the 2nd process can enter
+ with "-register -failchk -recover"
+
+ 9. 2nd process enters with "-register -failchk". Kill the 1st process.
+ 2nd process may get blocked on a mutex held by process one. Verify
+ 3rd process can enter with "-register -recover -failchk". 3rd process
+ should run failchk, clear out open txn/log from process 1. It will
+ enter env without need for any additional recovery. We look for
+ "Freeing log information .." sentence in the log for 3rd process as
+ an indication that failchk ran. If DB_RUNRECOVERY were returned
+ instead it would mean failchk could not recover.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env013
+ Test of basic functionality of fileid_reset.
+
+ Create a database in an env. Copy it to a new file within
+ the same env. Reset the file id and make sure it has changed.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env014
+
+ Make sure that attempts to open an environment with
+ incompatible flags (e.g. replication without transactions)
+ fail with the appropriate messages.
+
+ A new thread of control joining an env automatically
+ initializes the same subsystems as the original env.
+ Make sure that the attempt to change subsystems when
+ joining an env fails with the appropriate messages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env015
+ Rename the underlying directory of an env, make sure everything
+ still works. Test runs with regular named databases and with
+ in-memory named databases.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env016
+ Replication settings and DB_CONFIG
+
+ Create a DB_CONFIG for various replication settings. Use
+ rep_stat or getter functions to verify they're set correctly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env017
+ Check documented "stat" fields against the fields
+ returned by the "stat" functions. Make sure they
+ match, and that none are missing.
+ These are the stat functions we test:
+ env log_stat
+ env lock_stat
+ env txn_stat
+ env mutex_stat
+ env rep_stat
+ env repmgr_stat
+ env mpool_stat
+ db_stat
+ seq_stat
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env018
+ Test getters when joining an env. When a second handle is
+ opened on an existing env, get_open_flags needs to return
+ the correct flags to the second handle so it knows what sort
+ of environment it's just joined.
+
+ For several different flags to env_open, open an env. Open
+ a second handle on the same env, get_open_flags and verify
+ the flag is returned.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop001.tcl
+ Test file system operations, combined in a transaction. [#7363]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop002.tcl
+ Test file system operations in the presence of bad permissions.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop003
+
+ Test behavior of create and truncate for compatibility
+ with sendmail.
+ 1. DB_TRUNCATE is not allowed with locking or transactions.
+ 2. Can -create into zero-length existing file.
+ 3. Can -create into non-zero-length existing file if and
+ only if DB_TRUNCATE is specified.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop004
+ Test of DB->rename(). (formerly test075)
+ Test that files can be renamed from one directory to another.
+ Test that files can be renamed using absolute or relative
+ pathnames.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop005
+ Test of DB->remove()
+ Formerly test080.
+ Test use of dbremove with and without envs, with absolute
+ and relative paths, and with subdirectories.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop006
+ Test file system operations in multiple simultaneous
+ transactions. Start one transaction, do a file operation.
+ Start a second transaction, do a file operation. Abort
+ or commit txn1, then abort or commit txn2, and check for
+ appropriate outcome.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop007
+ Test file system operations on named in-memory databases.
+ Combine two ops in one transaction.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+fop008
+ Test file system operations on named in-memory databases.
+ Combine two ops in one transaction.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+jointest
+ Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+ with differing index orders and selectivity.
+
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those
+ work, everything else does as well. We'll create test databases
+ called join1.db, join2.db, join3.db, and join4.db. The number on
+ the database describes the duplication -- duplicates are of the
+ form 0, N, 2N, 3N, ... where N is the number of the database.
+ Primary.db is the primary database, and null.db is the database
+ that has no matching duplicates.
+
+ We should test this on all btrees, all hash, and a combination thereof
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock001
+ Make sure that the basic lock tests work. Do some simple gets
+ and puts for a single locker.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock002
+ Exercise basic multi-process aspects of lock.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock003
+ Exercise multi-process aspects of lock. Generate a bunch of parallel
+ testers that try to randomly obtain locks; make sure that the locks
+ correctly protect corresponding objects.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock004
+ Test locker ids wraping around.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock005
+ Check that page locks are being released properly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock006
+ Test lock_vec interface. We do all the same things that
+ lock001 does, using lock_vec instead of lock_get and lock_put,
+ plus a few more things like lock-coupling.
+ 1. Get and release one at a time.
+ 2. Release with put_obj (all locks for a given locker/obj).
+ 3. Release with put_all (all locks for a given locker).
+ Regularly check lock_stat to verify all locks have been
+ released.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log001
+ Read/write log records.
+ Test with and without fixed-length, in-memory logging,
+ and encryption.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log002
+ Tests multiple logs
+ Log truncation
+ LSN comparison and file functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log003
+ Verify that log_flush is flushing records correctly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log004
+ Make sure that if we do PREVs on a log, but the beginning of the
+ log has been truncated, we do the right thing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log005
+ Check that log file sizes can change on the fly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log006
+ Test log file auto-remove.
+ Test normal operation.
+ Test a long-lived txn.
+ Test log_archive flags.
+ Test db_archive flags.
+ Test turning on later.
+ Test setting via DB_CONFIG.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log007
+ Test of in-memory logging bugs. [#11505]
+
+ Test db_printlog with in-memory logs.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log008
+ Test what happens if a txn_ckp record falls into a
+ different log file than the DBREG_CKP records generated
+ by the same checkpoint.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log009
+ Test of logging and getting log file version information.
+ Each time we cross a log file boundary verify we can
+ get the version via the log cursorlag.
+ Do this both forward and backward.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp001
+ Randomly updates pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp002
+ Tests multiple processes accessing and modifying the same files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp003
+ Test reader-only/writer process combinations; we use the access methods
+ for testing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp004
+ Test that small read-only databases are mapped into memory.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mut001
+ Exercise the mutex API.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mut002
+ Two-process mutex test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mut003
+ Try doing mutex operations out of order. Make sure
+ we get appropriate errors.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+plat001
+
+ Test of portability of sequences.
+
+ Create and dump a database containing sequences. Save the dump.
+ This test is used in conjunction with the upgrade tests, which
+ will compare the saved dump to a locally created dump.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd001
+ Per-operation recovery tests for non-duplicate, non-split
+ messages. Makes sure that we exercise redo, undo, and do-nothing
+ condition. Any test that appears with the message (change state)
+ indicates that we've already run the particular test, but we are
+ running it again so that we can change the state of the data base
+ to prepare for the next test (this applies to all other recovery
+ tests as well).
+
+ These are the most basic recovery tests. We do individual recovery
+ tests for each operation in the access method interface. First we
+ create a file and capture the state of the database (i.e., we copy
+ it. Then we run a transaction containing a single operation. In
+ one test, we abort the transaction and compare the outcome to the
+ original copy of the file. In the second test, we restore the
+ original copy of the database and then run recovery and compare
+ this against the actual database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd002
+ Split recovery tests. For every known split log message, makes sure
+ that we exercise redo, undo, and do-nothing condition.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd003
+ Duplicate recovery tests. For every known duplicate log message,
+ makes sure that we exercise redo, undo, and do-nothing condition.
+
+ Test all the duplicate log messages and recovery operations. We make
+ sure that we exercise all possible recovery actions: redo, undo, undo
+ but no fix necessary and redo but no fix necessary.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd004
+ Big key test where big key gets elevated to internal page.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd005
+ Verify reuse of file ids works on catastrophic recovery.
+
+ Make sure that we can do catastrophic recovery even if we open
+ files using the same log file id.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd006
+ Nested transactions.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd007
+ File create/delete tests.
+
+ This is a recovery test for create/delete of databases. We have
+ hooks in the database so that we can abort the process at various
+ points and make sure that the transaction doesn't commit. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd008
+ Test deeply nested transactions and many-child transactions.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd009
+ Verify record numbering across split/reverse splits and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd010
+ Test stability of btree duplicates across btree off-page dup splits
+ and reverse splits and across recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd011
+ Verify that recovery to a specific timestamp works.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd012
+ Test of log file ID management. [#2288]
+ Test recovery handling of file opens and closes.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd013
+ Test of cursor adjustment on child transaction aborts. [#2373]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd014
+ This is a recovery test for create/delete of queue extents. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd015
+ This is a recovery test for testing lots of prepared txns.
+ This test is to force the use of txn_recover to call with the
+ DB_FIRST flag and then DB_NEXT.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd016
+ Test recovery after checksum error.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd017
+ Test recovery and security. This is basically a watered
+ down version of recd001 just to verify that encrypted environments
+ can be recovered.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd018
+ Test recover of closely interspersed checkpoints and commits.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd019
+ Test txn id wrap-around and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd020
+ Test creation of intermediate directories -- an
+ undocumented, UNIX-only feature.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd021
+ Test of failed opens in recovery.
+
+ If a file was deleted through the file system (and not
+ within Berkeley DB), an error message should appear.
+ Test for regular files and subdbs.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd022
+ Test that pages allocated by an aborted subtransaction
+ within an aborted prepared parent transaction are returned
+ to the free list after recovery. This exercises
+ __db_pg_prepare in systems without FTRUNCATE. [#7403]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd023
+ Test recover of reverse split.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd024
+ Test recovery of streaming partial insert operations. These are
+ operations that do multiple partial puts that append to an existing
+ data item (as long as the data item is on an overflow page).
+ The interesting cases are:
+ * Simple streaming operations
+ * Operations that cause the overflow item to flow onto another page.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep001
+ Replication rename and forced-upgrade test.
+
+ Run rep_test in a replicated master environment.
+ Verify that the database on the client is correct.
+ Next, remove the database, close the master, upgrade the
+ client, reopen the master, and make sure the new master can
+ correctly run rep_test and propagate it in the other direction.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep002
+ Basic replication election test.
+
+ Run a modified version of test001 in a replicated master
+ environment; hold an election among a group of clients to
+ make sure they select a proper master from amongst themselves,
+ in various scenarios.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep003
+ Repeated shutdown/restart replication test
+
+ Run a quick put test in a replicated master environment;
+ start up, shut down, and restart client processes, with
+ and without recovery. To ensure that environment state
+ is transient, use DB_PRIVATE.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep005
+ Replication election test with error handling.
+
+ Run rep_test in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ a proper master from amongst themselves, forcing errors at various
+ locations in the election path.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep006
+ Replication and non-rep env handles.
+
+ Run a modified version of test001 in a replicated master
+ environment; verify that the database on the client is correct.
+ Next, create a non-rep env handle to the master env.
+ Attempt to open the database r/w to force error.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep007
+ Replication and bad LSNs
+
+ Run rep_test in a replicated master env.
+ Close the client. Make additional changes to master.
+ Close the master. Open the client as the new master.
+ Make several different changes. Open the old master as
+ the client. Verify periodically that contents are correct.
+ This test is not appropriate for named in-memory db testing
+ because the databases are lost when both envs are closed.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep008
+ Replication, back up and synchronizing
+
+ Run a modified version of test001 in a replicated master
+ environment.
+ Close master and client.
+ Copy the master log to the client.
+ Clean the master.
+ Reopen the master and client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep009
+ Replication and DUPMASTERs
+ Run test001 in a replicated environment.
+
+ Declare one of the clients to also be a master.
+ Close a client, clean it and then declare it a 2nd master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep010
+ Replication and ISPERM
+
+ With consecutive message processing, make sure every
+ DB_REP_PERMANENT is responded to with an ISPERM when
+ processed. With gaps in the processing, make sure
+ every DB_REP_PERMANENT is responded to with an ISPERM
+ or a NOTPERM. Verify in both cases that the LSN returned
+ with ISPERM is found in the log.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep011
+ Replication: test open handle across an upgrade.
+
+ Open and close test database in master environment.
+ Update the client. Check client, and leave the handle
+ to the client open as we close the masterenv and upgrade
+ the client to master. Reopen the old master as client
+ and catch up. Test that we can still do a put to the
+ handle we created on the master while it was still a
+ client, and then make sure that the change can be
+ propagated back to the new client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep012
+ Replication and dead DB handles.
+
+ Run a modified version of test001 in a replicated master env.
+ Run in replicated environment with secondary indices too.
+ Make additional changes to master, but not to the client.
+ Downgrade the master and upgrade the client with open db handles.
+ Verify that the roll back on clients gives dead db handles.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep013
+ Replication and swapping master/clients with open dbs.
+
+ Run a modified version of test001 in a replicated master env.
+ Make additional changes to master, but not to the client.
+ Swap master and client.
+ Verify that the roll back on clients gives dead db handles.
+ Rerun the test, turning on client-to-client synchronization.
+ Swap and verify several times.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep014
+ Replication and multiple replication handles.
+ Test multiple client handles, opening and closing to
+ make sure we get the right openfiles.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep015
+ Locking across multiple pages with replication.
+
+ Open master and client with small pagesize and
+ generate more than one page and generate off-page
+ dups on the first page (second key) and last page
+ (next-to-last key).
+ Within a single transaction, for each database, open
+ 2 cursors and delete the first and last entries (this
+ exercises locks on regular pages). Intermittently
+ update client during the process.
+ Within a single transaction, for each database, open
+ 2 cursors. Walk to the off-page dups and delete one
+ from each end (this exercises locks on off-page dups).
+ Intermittently update client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep016
+ Replication election test with varying required nvotes.
+
+ Run a modified version of test001 in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ the master with varying required participants.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep017
+ Concurrency with checkpoints.
+
+ Verify that we achieve concurrency in the presence of checkpoints.
+ Here are the checks that we wish to make:
+ While dbenv1 is handling the checkpoint record:
+ Subsequent in-order log records are accepted.
+ Accepted PERM log records get NOTPERM
+ A subsequent checkpoint gets NOTPERM
+ After checkpoint completes, next txn returns PERM
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep018
+ Replication with dbremove.
+
+ Verify that the attempt to remove a database file
+ on the master hangs while another process holds a
+ handle on the client.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep019
+ Replication and multiple clients at same LSN.
+ Have several clients at the same LSN. Run recovery at
+ different times. Declare a client master and after sync-up
+ verify all client logs are identical.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep020
+ Replication elections - test election generation numbers.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep021
+ Replication and multiple environments.
+ Run similar tests in separate environments, making sure
+ that some data overlaps. Then, "move" one client env
+ from one replication group to another and make sure that
+ we do not get divergent logs. We either match the first
+ record and end up with identical logs or we get an error.
+ Verify all client logs are identical if successful.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep022
+ Replication elections - test election generation numbers
+ during simulated network partition.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep023
+ Replication using two master handles.
+
+ Open two handles on one master env. Create two
+ databases, one through each master handle. Process
+ all messages through the first master handle. Make
+ sure changes made through both handles are picked
+ up properly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep024
+ Replication page allocation / verify test
+
+ Start a master (site 1) and a client (site 2). Master
+ closes (simulating a crash). Site 2 becomes the master
+ and site 1 comes back up as a client. Verify database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep025
+ Test of DB_REP_JOIN_FAILURE.
+
+ One master, one client.
+ Generate several log files.
+ Remove old master log files.
+ Delete client files and restart client.
+ Put one more record to the master. At the next
+ processing of messages, the client should get JOIN_FAILURE.
+ Recover with a hot failover.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep026
+ Replication elections - simulate a crash after sending
+ a vote.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep027
+ Replication and secondary indexes.
+
+ Set up a secondary index on the master and make sure
+ it can be accessed from the client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep028
+ Replication and non-rep env handles. (Also see rep006.)
+
+ Open second non-rep env on client, and create a db
+ through this handle. Open the db on master and put
+ some data. Check whether the non-rep handle keeps
+ working. Also check if opening the client database
+ in the non-rep env writes log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep029
+ Test of internal initialization.
+
+ One master, one client.
+ Generate several log files.
+ Remove old master log files.
+ Delete client files and restart client.
+ Put one more record to the master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep030
+ Test of internal initialization multiple files and pagesizes.
+ Hold some databases open on master.
+
+ One master, one client using a data_dir for internal init.
+ Generate several log files.
+ Remove old master log files.
+ Delete client files and restart client.
+ Put one more record to the master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep031
+ Test of internal initialization and blocked operations.
+
+ One master, one client.
+ Put one more record to the master.
+ Test that internal initialization blocks:
+ log_archive, rename, remove, fileid_reset, lsn_reset.
+ Sleep 30+ seconds.
+ Test that blocked operations are now unblocked.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep032
+ Test of log gap processing.
+
+ One master, one client.
+ Run rep_test.
+ Run rep_test without sending messages to client.
+ Make sure client missing the messages catches up properly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep033
+ Test of internal initialization with rename and remove of dbs.
+
+ One master, one client.
+ Generate several databases. Replicate to client.
+ Do some renames and removes, both before and after
+ closing the client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep034
+ Test of STARTUPDONE notification.
+
+ STARTUPDONE can now be recognized without the need for new "live" log
+ records from the master (under favorable conditions). The response to
+ the ALL_REQ at the end of synchronization includes an end-of-log marker
+ that now triggers it. However, the message containing that end marker
+ could get lost, so live log records still serve as a back-up mechanism.
+ The end marker may also be set under c2c sync, but only if the serving
+ client has itself achieved STARTUPDONE.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep035
+ Test sync-up recovery in replication.
+
+ We need to fork off 3 child tclsh processes to operate
+ on Site 3's (client always) home directory:
+ Process 1 continually calls lock_detect.
+ Process 2 continually calls txn_checkpoint.
+ Process 3 continually calls memp_trickle.
+ Process 4 continually calls log_archive.
+ Sites 1 and 2 will continually swap being master
+ (forcing site 3 to continually run sync-up recovery)
+ New master performs 1 operation, replicates and downgrades.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep036
+ Multiple master processes writing to the database.
+ One process handles all message processing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep037
+ Test of internal initialization and page throttling.
+
+ One master, one client, force page throttling.
+ Generate several log files.
+ Remove old master log files.
+ Delete client files and restart client.
+ Put one more record to the master.
+ Verify page throttling occurred.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep038
+ Test of internal initialization and ongoing master updates.
+
+ One master, one client.
+ Generate several log files.
+ Remove old master log files.
+ Delete client files and restart client.
+ Put more records on master while initialization is in progress.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep039
+ Test of interrupted internal initialization. The
+ interruption is due to a changed master, or the client crashing,
+ or both.
+
+ One master, two clients.
+ Generate several log files. Remove old master log files.
+ Restart client, optionally having "cleaned" client env dir. Either
+ way, this has the effect of forcing an internal init.
+ Interrupt the internal init.
+ Vary the number of times we process messages to make sure
+ the interruption occurs at varying stages of the first internal
+ initialization.
+
+ Run for btree and queue only because of the number of permutations.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep040
+ Test of racing rep_start and transactions.
+
+ One master, one client.
+ Have master in the middle of a transaction.
+ Call rep_start to make master a client.
+ Commit the transaction.
+ Call rep_start to make master the master again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep041
+ Turn replication on and off at run-time.
+
+ Start a master with replication OFF (noop transport function).
+ Run rep_test to advance log files and archive.
+ Start up client; change master to working transport function.
+ Now replication is ON.
+ Do more ops, make sure client is up to date.
+ Close client, turn replication OFF on master, do more ops.
+ Repeat from point A.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep042
+ Concurrency with updates.
+
+ Verify racing role changes and updates don't result in
+ pages with LSN 0,1. Set up an environment that is master.
+ Spawn child process that does a delete, but using the
+ $env check so that it sleeps in the middle of the call.
+ Master downgrades and then sleeps as a client so that
+ child will run. Verify child does not succeed (should
+ get read-only error) due to role change in the middle of
+ its call.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep043
+
+ Constant writes during upgrade/downgrade.
+
+ Three envs take turns being master. Each env
+ has a child process which does writes all the
+ time. They will succeed when that env is master
+ and fail when it is not.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep044
+
+ Test rollbacks with open file ids.
+
+ We have one master with two handles and one client.
+ Each time through the main loop, we open a db, write
+ to the db, and close the db. Each one of these actions
+ is propagated to the client, or a roll back is forced
+ by swapping masters.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep045
+
+ Replication with versions.
+
+ Mimic an application where a database is set up in the
+ background and then put into a replication group for use.
+ The "version database" identifies the current live
+ version, the database against which queries are made.
+ For example, the version database might say the current
+ version is 3, and queries would then be sent to db.3.
+ Version 4 is prepared for use while version 3 is in use.
+ When version 4 is complete, the version database is updated
+ to point to version 4 so queries can be directed there.
+
+ This test has a master and two clients. One client swaps
+ roles with the master, and the other client runs constantly
+ in another process.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep046
+ Replication and basic bulk transfer.
+ Set bulk transfer replication option.
+ Run long txns on master and then commit. Process on client
+ and verify contents. Run a very long txn so that logging
+ must send the log. Process and verify on client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep047
+ Replication and log gap bulk transfers.
+ Set bulk transfer replication option.
+ Run test. Start a new client (to test ALL_REQ and bulk).
+ Run small test again. Clear messages for 1 client.
+ Run small test again to test LOG_REQ gap processing and bulk.
+ Process and verify on clients.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep048
+ Replication and log gap bulk transfers.
+ Have two master env handles. Turn bulk on in
+ one (turns it on for both). Turn it off in the other.
+ While toggling, send log records from both handles.
+ Process message and verify master and client match.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep049
+ Replication and delay syncing clients - basic test.
+
+ Open and start up a master and two clients. Turn on delay sync
+ in the delayed client. Change master, add data and process messages.
+ Verify delayed client does not match. Make additional changes and
+ update the delayted client. Verify all match.
+ Add in a fresh delayed client to test delay of ALL_REQ.
+ Process startup messages and verify freshc client has no database.
+ Sync and verify fresh client matches.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep050
+ Replication and delay syncing clients - change master test.
+
+ Open and start up master and 4 clients. Turn on delay for 3 clients.
+ Switch masters, add data and verify delayed clients are out of date.
+ Make additional changes to master. And change masters again.
+ Sync/update delayed client and verify. The 4th client is a brand
+ new delayed client added in to test the non-verify path.
+
+ Then test two different things:
+ 1. Swap master again while clients are still delayed.
+ 2. Swap master again while sync is proceeding for one client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep051
+ Test of compaction with replication.
+
+ Run rep_test in a replicated master environment.
+ Delete a large number of entries and compact with -freespace.
+ Propagate the changes to the client and make sure client and
+ master match.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep052
+ Test of replication with NOWAIT.
+
+ One master, one client. After initializing
+ everything normally, close client and let the
+ master get ahead -- far enough that the master
+ no longer has the client's last log file.
+ Reopen the client and turn on NOWAIT.
+ Process a few messages to get the client into
+ recovery mode, and verify that lockout occurs
+ on a txn API call (txn_begin) and an env API call.
+ Process all the messages and verify that lockout
+ is over.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep053
+ Replication and basic client-to-client synchronization.
+
+ Open and start up master and 1 client.
+ Start up a second client later and verify it sync'ed from
+ the original client, not the master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep054
+ Test of internal initialization where a far-behind
+ client takes over as master.
+
+ One master, two clients.
+ Run rep_test and process.
+ Close client 1.
+ Run rep_test, opening new databases, and processing
+ messages. Archive as we go so that log files get removed.
+ Close master and reopen client 1 as master. Process messages.
+ Verify that new master and client are in sync.
+ Run rep_test again, adding data to one of the new
+ named databases.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep055
+ Test of internal initialization and log archiving.
+
+ One master, one client.
+ Generate several log files.
+ Remove old master log files and generate several more.
+ Get list of archivable files from db_archive and restart client.
+ As client is in the middle of internal init, remove
+ the log files returned earlier by db_archive.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep058
+
+ Replication with early databases
+
+ Mimic an application where they create a database before
+ calling rep_start, thus writing log records on a client
+ before it is a client. Verify we cannot join repl group.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep060
+ Test of normally running clients and internal initialization.
+ Have a client running normally, but slow/far behind the master.
+ Then the master checkpoints and archives, causing the client
+ to suddenly be thrown into internal init. This test tests
+ that we clean up the old files/pages in mpool and dbreg.
+ Also test same thing but the app holding an open dbp as well.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep061
+ Test of internal initialization multiple files and pagesizes
+ with page gaps.
+
+ One master, one client.
+ Generate several log files.
+ Remove old master log files.
+ Delete client files and restart client.
+ Put one more record to the master.
+ Force some page messages to get dropped.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep062
+ Test of internal initialization where client has a different
+ kind of database than the master.
+
+ Create a master of one type, and let the client catch up.
+ Close the client.
+ Remove the database on the master, and create a new
+ database of the same name but a different type.
+ Run the master ahead far enough that internal initialization
+ will be required on the reopen of the client.
+ Reopen the client and verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep063
+ Replication election test with simulated different versions
+ for each site. This tests that old sites with real priority
+ trump ELECTABLE sites with zero priority even with greater LSNs.
+ There is a special case in the code for testing that if the
+ priority is <= 10, we simulate mixed versions for elections.
+
+ Run a rep_test in a replicated master environment and close;
+ hold an election among a group of clients to make sure they select
+ the master with varying LSNs and priorities.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep064
+ Replication rename and forced-upgrade test.
+
+ The test verifies that the client correctly
+ (internally) closes files when upgrading to master.
+ It does this by having the master have a database
+ open, then crashing. The client upgrades to master,
+ and attempts to remove the open database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep065
+ Tests replication running with different versions.
+ This capability is introduced with 4.5.
+
+ Start a replication group of 1 master and N sites, all
+ running some historical version greater than or equal to 4.4.
+ Take down a client and bring it up again running current.
+ Run some upgrades, make sure everything works.
+
+ Each site runs the tcllib of its own version, but uses
+ the current tcl code (e.g. test.tcl).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep066
+ Replication and dead log handles.
+
+ Run rep_test on master and a client.
+ Simulate client crashes (master continues) until log 2.
+ Open 2nd master env handle and put something in log and flush.
+ Downgrade master, restart client as master.
+ Run rep_test on newmaster until log 2.
+ New master writes log records, newclient processes records
+ and 2nd newclient env handle calls log_flush.
+ New master commits, newclient processes and should succeed.
+ Make sure 2nd handle detects the old log handle and doesn't
+ write to a stale handle (if it does, the processing of the
+ commit will fail).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep067
+ Replication election test with large timeouts.
+
+ Test replication elections among clients with widely varying
+ timeouts. This test is used to simulate a customer that
+ wants to force full participation in an election, but only
+ if all sites are present (i.e. if all sites are restarted
+ together). If any site has already been part of the group,
+ then we want to be able to elect a master based on majority.
+ Using varied timeouts, we can force full participation if
+ all sites are present with "long_timeout" amount of time and
+ then revert to majority.
+
+ A long_timeout would be several minutes whereas a normal
+ short timeout would be a few seconds.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep068
+ Verify replication of dbreg operations does not hang clients.
+ In a simple replication group, create a database with very
+ little data. With DB_TXN_NOSYNC the database can be created
+ at the client even though the log is not flushed. If we crash
+ and restart, the application of the log starts over again, even
+ though the database is still there. The application can open
+ the database before replication tries to re-apply the create.
+ This causes a hang as replication waits to be able to get a
+ handle lock.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep069
+ Test of internal initialization and elections.
+
+ If a client is in a recovery mode of any kind, it
+ participates in elections at priority 0 so it can
+ never be elected master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep070
+ Test of startup_done condition with idle master.
+
+ Join a client to an existing master, and verify that
+ the client detects startup_done even if the master
+ does not execute any new transactions.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep071
+ Test of multiple simultaneous client env handles and
+ upgrading/downgrading. Tests use of temp db handle
+ internally.
+
+ Open a master and 2 handles to the same client env.
+ Run rep_test.
+ Close master and upgrade client to master using one env handle.
+ Run rep_test again, and then downgrade back to client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep072
+ Verify that internal init does not leak resources from
+ the locking subsystem.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep073
+
+ Test of allowing clients to create and update their own scratch
+ databases within the environment. Doing so requires the use
+ use of the DB_TXN_NOT_DURABLE flag for those databases.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep074
+ Verify replication withstands send errors processing requests.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep075
+ Replication and prepared transactions.
+ Test having outstanding prepared transactions and simulating
+ crashing or upgrading or downgrading sites.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep076
+ Replication elections - what happens if elected client
+ does not become master?
+
+ Set up a master and 3 clients. Take down master, run election.
+ The elected client will ignore the fact that it's been elected,
+ so we still have 2 clients.
+
+ Run another election, a regular election that allows the winner
+ to become master, and make sure it goes okay. We do this both
+ for the client that ignored its election and for the other client.
+
+ This simulates what would happen if, say, we had a temporary
+ network partition and lost the winner.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep077
+
+ Replication, recovery and applying log records immediately.
+ Master and 1 client. Start up both sites.
+ Close client and run rep_test on the master so that the
+ log record is the same LSN the client would be expecting.
+ Reopen client with recovery and verify the client does not
+ try to apply that "expected" record before it synchronizes
+ with the master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep078
+
+ Replication and basic lease test.
+ Set leases on master and 2 clients.
+ Do a lease operation and process to all clients.
+ Read with lease on master. Do another lease operation
+ and don't process on any client. Try to read with
+ on the master and verify it fails. Process the messages
+ to the clients and retry the read.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep079
+ Replication leases and invalid usage.
+
+ Open a client without leases. Attempt to set leases after rep_start.
+ Attempt to declare as master without election.
+ Run an election with an nsites parameter value.
+ Elect a master with leases. Put some data and send to clients.
+ Cleanly shutdown master env. Restart without
+ recovery and verify leases are expired and refreshed.
+ Add a new client without leases to a group using leases.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep080
+ NOAUTOINIT with empty client logs.
+
+ Verify that a fresh client trying to join the group for
+ the first time observes the setting of DELAY_SYNC and NOAUTOINIT
+ properly.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep081
+ Test of internal initialization and missing database files.
+
+ One master, one client, two databases.
+ Generate several log files.
+ Remove old master log files.
+ Start up client.
+ Remove or replace one master database file while client initialization
+ is in progress, make sure other master database can keep processing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep082
+ Sending replication requests to correct master site.
+
+ Regression test for a bug [#16592] where a client could send an
+ UPDATE_REQ to another client instead of the master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep083
+ Replication clients must never send VERIFY_FAIL to a c2c request.
+
+ Regression test for a bug [#16592] where a client could send a
+ VERIFY_FAIL to another client, which is illegal.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep084
+ Abbreviated internal init for named in-memory databases (NIMDBs).
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep085
+ Skipping unnecessary abbreviated internal init.
+
+ Make sure that once we've materialized NIMDBs, we don't bother
+ trying to do it again on subsequent sync without recovery. Make
+ sure we do probe for the need to materialize NIMDBs, but don't do
+ any internal init at all if there are no NIMDBs. Note that in order to
+ do this test we don't even need any NIMDBs.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep086
+ Interrupted abbreviated internal init.
+
+ Make sure we cleanly remove partially loaded named in-memory
+ databases (NIMDBs).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep087
+ Abbreviated internal init with open file handles.
+
+ Client has open handle to an on-disk DB when abbreviated
+ internal init starts. Make sure we lock out access, and make sure
+ it ends up as HANDLE_DEAD. Also, make sure that if there are
+ no NIMDBs, that we *don't* get HANDLE_DEAD.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep088
+ Replication roll-back preserves checkpoint.
+
+ Create a situation where a client has to roll back its
+ log, discarding some existing transactions, in order to sync
+ with a new master.
+
+ 1. When the client still has its entire log file history, all
+ the way back to log file #1, it's OK if the roll-back discards
+ any/all checkpoints.
+ 2. When old log files have been archived, if the roll-back would
+ remove all existing checkpoints it must be forbidden. The log
+ must always have a checkpoint (or all files back through #1).
+ The client must do internal init or return JOIN_FAILURE.
+ 3. (the normal case) Old log files archived, and a checkpoint
+ still exists in the portion of the log which will remain after
+ the roll-back: no internal-init/JOIN_FAILURE necessary.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr001
+ Basic repmgr test.
+
+ Create an appointed master and two clients, process some records and
+ verify resulting databases.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr002
+ Basic repmgr election test.
+
+ Open three clients of different priorities and make sure repmgr
+ elects expected master. Shut master down, make sure repmgr elects
+ expected remaining client master, make sure former master can join
+ as client.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr003
+ Basic repmgr internal init test.
+
+ Start an appointed master site and two clients, processing
+ transactions between each additional site. Verify all expected
+ transactions are replicated.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr004
+ Basic repmgr test with in-memory logs.
+
+ Create an appointed master and two clients, process some records and
+ verify resulting databases.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr005
+ Basic repmgr test with in-memory databases.
+
+ Create an appointed master and two clients, process some records and
+ verify resulting databases.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr006
+ Basic repmgr test with bulk processing.
+
+ Create an appointed master and two clients, process some records and
+ verify resulting databases.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr007
+ Basic repmgr client shutdown/restart test.
+
+ Start an appointed master site and two clients. Shutdown and
+ restart each client, processing transactions after each restart.
+ Verify all expected transactions are replicated.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr008
+ Basic repmgr test with client-to-client configuration.
+
+ Create a master and two clients, process some records and verify
+ resulting databases.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr009
+ repmgr API error test.
+
+ Try a variety of repmgr calls that result in errors. Also
+ try combinations of repmgr and base replication API calls
+ that result in errors.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr010
+ Acknowledgement policy and timeout test.
+
+ Verify that "quorum" acknowledgement policy succeeds with fewer than
+ nsites running. Verify that "all" acknowledgement policy results in
+ ack failures with fewer than nsites running.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr011
+ repmgr two site strict majority test.
+
+ Start an appointed master and one client with 2 site strict
+ majority set. Shut down the master site, wait and verify that
+ the client site was not elected master. Start up master site
+ and verify that transactions are processed as expected.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr012
+ repmgr heartbeat test.
+
+ Start an appointed master and one client site. Set heartbeat
+ send and monitor values and process some transactions. Stop
+ sending heartbeats from master and verify that client sees
+ a dropped connection.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr013
+ Site list test.
+
+ Configure a master and two clients where one client is a peer of
+ the other and verify resulting site lists.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr014
+ Basic repmgr in-memory test.
+
+ Create an appointed master and two clients, process some records and
+ verify resulting databases. Put databases, logs and replication files
+ in-memory.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr015
+ Basic repmgr in-memory election test.
+
+ Open three clients of different priorities and make sure repmgr
+ elects expected master. Shut master down, make sure repmgr elects
+ expected remaining client master, make sure former master can join
+ as client. Replication files are in-memory; databases, logs and
+ environment regions are on-disk.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr016
+ Basic repmgr in-memory internal init test.
+
+ Start an appointed master site and two clients, processing
+ transactions between each additional site. Verify all expected
+ transactions are replicated. Replication files are in-memory;
+ databases, logs and environment regions are on-disk.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr017
+ repmgr in-memory cache overflow test.
+
+ Start an appointed master site and one client, putting databases,
+ environment regions, logs and replication files in-memory. Set
+ very small cachesize and run enough transactions to overflow cache.
+ Shut down and restart master and client, giving master a larger cache.
+ Run and verify a small number of transactions.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr018
+ Check repmgr stats.
+
+ Start an appointed master and one client. Shut down the client,
+ run some transactions at the master and verify that there are
+ acknowledgement failures and one dropped connection. Shut down
+ and restart client again and verify that there are two dropped
+ connections.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr019
+ Basic repmgr test with in-memory databases and in-memory logs.
+
+ Create an appointed master and two clients, process some records and
+ verify resulting databases.
+
+ Run for btree only because access method shouldn't matter.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr022
+ Basic test of repmgr's multi-process master support.
+
+ Set up a simple 2-site group, create data and replicate it.
+ Add a second process at the master and have it write some
+ updates. It does not explicitly start repmgr (nor do any
+ replication configuration, for that matter). Its first
+ update triggers initiation of connections, and so it doesn't
+ get to the client without a log request. But later updates
+ should go directly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr023
+ Repmgr support for multi-process master.
+
+ Start two processes at the master.
+ Add a client site (not previously known to the master
+ processes), and make sure
+ both master processes connect to it.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr024
+ Ensuring exactly one listener process.
+
+ Start a repmgr process with a listener.
+ Start a second process, and see that it does not become the listener.
+ Shut down the first process (gracefully). Now a second process should
+ become listener.
+ Kill the listener process abruptly. Running failchk should show that
+ recovery is necessary. Run recovery and start a clean listener.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr025
+ Repmgr site address discovery via handshakes.
+
+ Master with 2 processes, does not know client's address.
+ Client processes start in either order, connect to master.
+ Master learns of client's address via handshake message.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr026
+ Repmgr site address discovery via NEWSITE
+
+ New client, previously unknown to (main) master process, connects to an
+ existing client, which broadcasts NEWSITE message.
+ This causes master to discover its address, and connect to it.
+ Other new clients may also have been added to master's configuration in
+ the interim (via a subordinate master process).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr027
+ Repmgr recognition of peer setting, across processes.
+
+ Set up a master and two clients, synchronized with some data.
+ Add a new client, configured to use c2c sync with one of the original
+ clients. Check stats to make sure the correct c2c peer was used.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr028
+ Simple smoke test for repmgr elections with multi-process envs.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr029
+ Repmgr combined with replication-unaware process at master.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr030
+ Subordinate connections and processes should not trigger elections.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr031
+ Test repmgr's internal juggling of peer EID's.
+
+ Set up master and 2 clients, A and B.
+ Add a third client (C), with two processes.
+ The first process will be configured to know about A.
+ The second process will know about B, and set that as peer,
+ but when it joins the env site B will have to be shuffled
+ into a later position in the list, because A is already first.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+repmgr032
+ Multi-process repmgr start-up policies.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc001
+ Test RPC server timeouts for cursor, txn and env handles.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc002
+ Test invalid RPC functions and make sure we error them correctly
+ Test server home directory error cases
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc003
+ Test RPC and secondary indices.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc004
+ Test RPC server and security
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc005
+ Test RPC server handle ID sharing
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc006
+ Test RPC server and multiple operations to server.
+ Make sure the server doesn't deadlock itself, but
+ returns DEADLOCK to the client.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc001
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc002
+ Recno backing file test #2: test of set_re_delim. Specify a backing
+ file with colon-delimited records, and make sure they are correctly
+ interpreted.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc003
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc004
+ Recno backing file test for EOF-terminated records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+scr###
+ The scr### directories are shell scripts that test a variety of
+ things, including things about the distribution itself. These
+ tests won't run on most systems, so don't even try to run them.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb001 Tests mixing db and subdb operations
+ Tests mixing db and subdb operations
+ Create a db, add data, try to create a subdb.
+ Test naming db and subdb with a leading - for correct parsing
+ Existence check -- test use of -excl with subdbs
+
+ Test non-subdb and subdb operations
+ Test naming (filenames begin with -)
+ Test existence (cannot create subdb of same name with -excl)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb002
+ Tests basic subdb functionality
+ Small keys, small data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+ Then repeat using an environment.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb003
+ Tests many subdbs
+ Creates many subdbs and puts a small amount of
+ data in each (many defaults to 1000)
+
+ Use the first 1000 entries from the dictionary as subdbnames.
+ Insert each with entry as name of subdatabase and a partial list
+ as key/data. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb004
+ Tests large subdb names
+ subdb name = filecontents,
+ key = filename, data = filecontents
+ Put/get per key
+ Dump file
+ Dump subdbs, verify data and subdb name match
+
+ Create 1 db with many large subdbs. Use the contents as subdb names.
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb005
+ Tests cursor operations in subdbs
+ Put/get per key
+ Verify cursor operations work within subdb
+ Verify cursor operations do not work across subdbs
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb006
+ Tests intra-subdb join
+
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+ everything else does as well. We'll create test databases called
+ sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+ describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+ ... where N is the number of the database. Primary.db is the primary
+ database, and sub0.db is the database that has no matching duplicates.
+ All of these are within a single database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb007
+ Tests page size difference errors between subdbs.
+ If the physical file already exists, we ignore pagesize specifications
+ on any subsequent -creates.
+
+ 1. Create/open a subdb with system default page size.
+ Create/open a second subdb specifying a different page size.
+ The create should succeed, but the pagesize of the new db
+ will be the system default page size.
+ 2. Create/open a subdb with a specified, non-default page size.
+ Create/open a second subdb specifying a different page size.
+ The create should succeed, but the pagesize of the new db
+ will be the specified page size from the first create.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb008
+ Tests explicit setting of lorders for subdatabases -- the
+ lorder should be ignored.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb009
+ Test DB->rename() method for subdbs
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb010
+ Test DB->remove() method and DB->truncate() for subdbs
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb011
+ Test deleting Subdbs with overflow pages
+ Create 1 db with many large subdbs.
+ Test subdatabases with overflow pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb012
+ Test subdbs with locking and transactions
+ Tests creating and removing subdbs while handles
+ are open works correctly, and in the face of txns.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb013
+ Tests in-memory subdatabases.
+ Create an in-memory subdb. Test for persistence after
+ overflowing the cache. Test for conflicts when we have
+ two in-memory files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb014
+ Tests mixing in-memory named and in-memory unnamed dbs.
+ Create a regular in-memory db, add data.
+ Create a named in-memory db.
+ Try to create the same named in-memory db again (should fail).
+ Try to create a different named in-memory db (should succeed).
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb015
+ Tests basic in-memory named database functionality
+ Small keys, small data
+ Put/get per key
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+ Then repeat using an environment.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb016
+ Creates many in-memory named dbs and puts a small amount of
+ data in each (many defaults to 100)
+
+ Use the first 100 entries from the dictionary as names.
+ Insert each with entry as name of subdatabase and a partial list
+ as key/data. After all are entered, retrieve all; compare output
+ to original.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb017
+ Test DB->rename() for in-memory named databases.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb018
+ Tests join of in-memory named databases.
+
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+ everything else does as well. We'll create test databases called
+ sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+ describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+ ... where N is the number of the database. Primary.db is the primary
+ database, and sub0.db is the database that has no matching duplicates.
+ All of these are within a single database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb019
+ Tests in-memory subdatabases.
+ Create an in-memory subdb. Test for persistence after
+ overflowing the cache. Test for conflicts when we have
+ two in-memory files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdb020
+ Tests in-memory subdatabases.
+ Create an in-memory subdb with one page size. Close, and
+ open with a different page size: should fail.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest001
+ Tests multiple access methods in one subdb
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Dump file, verify per subdb
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Rotate methods and repeat [#762].
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest002
+ Tests multiple access methods in one subdb access by multiple
+ processes.
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Fork off several child procs to each delete selected
+ data from their subdb and then exit
+ Dump file, verify contents of each subdb is correct
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Fork of some child procs to each manipulate one subdb and when
+ they are finished, verify the contents of the databases.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec001
+ Test of security interface
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec002
+ Test of security interface and catching errors in the
+ face of attackers overwriting parts of existing files.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si001
+ Secondary index put/delete with lorder test
+
+ Put data in primary db and check that pget on secondary
+ index finds the right entries. Alter the primary in the
+ following ways, checking for correct data each time:
+ Overwrite data in primary database.
+ Delete half of entries through primary.
+ Delete half of remaining entries through secondary.
+ Append data (for record-based primaries only).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si002
+ Basic cursor-based secondary index put/delete test
+
+ Cursor put data in primary db and check that pget
+ on secondary index finds the right entries.
+ Open and use a second cursor to exercise the cursor
+ comparison API on secondaries.
+ Overwrite while walking primary, check pget again.
+ Overwrite while walking secondary (use c_pget), check
+ pget again.
+ Cursor delete half of entries through primary, check.
+ Cursor delete half of remainder through secondary, check.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si003
+ si001 with secondaries created and closed mid-test
+ Basic secondary index put/delete test with secondaries
+ created mid-test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si004
+ si002 with secondaries created and closed mid-test
+ Basic cursor-based secondary index put/delete test, with
+ secondaries created mid-test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si005
+ Basic secondary index put/delete test with transactions
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si006
+
+ Test -immutable_key interface.
+
+ DB_IMMUTABLE_KEY is an optimization to be used when a
+ secondary key will not be changed. It does not prevent
+ a deliberate change to the secondary key, it just does not
+ propagate that change when it is made to the primary.
+ This test verifies that a change to the primary is propagated
+ to the secondary or not as specified by -immutable_key.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si007
+ Secondary index put/delete with lorder test
+
+ This test is the same as si001 with the exception
+ that we create and populate the primary and THEN
+ create the secondaries and associate them with -create.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+si008
+ Secondary index put/delete with lorder test
+
+ This test is the same as si001 except that we
+ create the secondaries with different byte orders:
+ one native, one swapped.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sijointest: Secondary index and join test.
+ This used to be si005.tcl.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test001
+ Small keys/data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test002
+ Small keys/medium data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test003
+ Small keys/large data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Take the source files and dbtest executable and enter their names
+ as the key with their contents as data. After all are entered,
+ retrieve all; compare output to original. Close file, reopen, do
+ retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test004
+ Small keys/medium data
+ Put/get per key
+ Sequential (cursor) get/delete
+
+ Check that cursor operations work. Create a database.
+ Read through the database sequentially using cursors and
+ delete each element.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test005
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Sequential (cursor) get/delete
+
+ Check that cursor operations work. Create a database; close
+ it and reopen it. Then read through the database sequentially
+ using cursors and delete each element.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test006
+ Small keys/medium data
+ Put/get per key
+ Keyed delete and verify
+
+ Keyed delete test.
+ Create database.
+ Go through database, deleting all entries by key.
+ Then do the same for unsorted and sorted dups.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test007
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Keyed delete
+
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry. (Test006 plus reopen)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test008
+ Small keys/large data
+ Put/get per key
+ Loop through keys by steps (which change)
+ ... delete each key at step
+ ... add each key back
+ ... change step
+ Confirm that overflow pages are getting reused
+
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, begin
+ looping through the entries; deleting some pairs and then readding them.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test009
+ Small keys/large data
+ Same as test008; close and reopen database
+
+ Check that we reuse overflow pages. Create database with lots of
+ big key/data pairs. Go through and delete and add keys back
+ randomly. Then close the DB and make sure that we have everything
+ we think we should.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test010
+ Duplicate test
+ Small key/data pairs.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test011
+ Duplicate test
+ Small key/data pairs.
+ Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+ To test off-page duplicates, run with small pagesize.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ Then do some key_first/key_last add_before, add_after operations.
+ This does not work for recno
+
+ To test if dups work when they fall off the main page, run this with
+ a very tiny page size.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test012
+ Large keys/small data
+ Same as test003 except use big keys (source files and
+ executables) and small data (the file/executable names).
+
+ Take the source files and dbtest executable and enter their contents
+ as the key with their names as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test013
+ Partial put test
+ Overwrite entire records using partial puts.
+ Make sure that NOOVERWRITE flag works.
+
+ 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+ 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+ 3. Actually overwrite each one with its datum reversed.
+
+ No partial testing here.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test014
+ Exercise partial puts on short data
+ Run 5 combinations of numbers of characters to replace,
+ and number of times to increase the size by.
+
+ Partial put test, small data, replacing with same size. The data set
+ consists of the first nentries of the dictionary. We will insert them
+ (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+ we'll try to perform partial puts of some characters at the beginning,
+ some at the end, and some at the middle.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test015
+ Partial put test
+ Partial put test where the key does not initially exist.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test016
+ Partial put test
+ Partial put where the datum gets shorter as a result of the put.
+
+ Partial put test where partial puts make the record smaller.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, go back and do partial puts,
+ replacing a random-length string with the key value.
+ Then verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test017
+ Basic offpage duplicate test.
+
+ Run duplicates with small page size so that we test off page duplicates.
+ Then after we have an off-page database, test with overflow pages too.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test018
+ Offpage duplicate test
+ Key_{first,last,before,after} offpage duplicates.
+ Run duplicates with small page size so that we test off page
+ duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test019
+ Partial get test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test020
+ In-Memory database tests.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test021
+ Btree range tests.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self, reversed as key and self as data.
+ After all are entered, retrieve each using a cursor SET_RANGE, and
+ getting about 20 keys sequentially after it (in some cases we'll
+ run out towards the end of the file).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test022
+ Test of DB->getbyteswapped().
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test023
+ Duplicate test
+ Exercise deletes and cursor operations within a duplicate set.
+ Add a key with duplicates (first time on-page, second time off-page)
+ Number the dups.
+ Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test024
+ Record number retrieval test.
+ Test the Btree and Record number get-by-number functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test025
+ DB_APPEND flag test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test026
+ Small keys/medium data w/duplicates
+ Put/get per key.
+ Loop through keys -- delete each key
+ ... test that cursors delete duplicates correctly
+
+ Keyed delete test through cursor. If ndups is small; this will
+ test on-page dups; if it's large, it will test off-page dups.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test027
+ Off-page duplicate test
+ Test026 with parameters to force off-page duplicates.
+
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test028
+ Cursor delete test
+ Test put operations after deleting through a cursor.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test029
+ Test the Btree and Record number renumbering.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test030
+ Test DB_NEXT_DUP Functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test031
+ Duplicate sorting functionality
+ Make sure DB_NODUPDATA works.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and "ndups" duplicates
+ For the data field, prepend random five-char strings (see test032)
+ that we force the duplicate sorting code to do something.
+ Along the way, test that we cannot insert duplicate duplicates
+ using DB_NODUPDATA.
+
+ By setting ndups large, we can make this an off-page test
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test032
+ DB_GET_BOTH, DB_GET_BOTH_RANGE
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test. By setting overflow to be 1, we can make
+ this an overflow test.
+
+ Test the DB_GET_BOTH functionality by retrieving each dup in the file
+ explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+ the unique key prefix (cursor only). Finally test the failure case.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test033
+ DB_GET_BOTH without comparison function
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and data; add duplicate records for each. After all are
+ entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+ DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+ nonexistent keys.
+
+ XXX
+ This does not work for rbtree.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test034
+ test032 with off-page or overflow case with non-duplicates
+ and duplicates.
+
+ DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page
+ or overflow case within non-duplicates and duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test035
+ Test033 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test036
+ Test KEYFIRST and KEYLAST when the key doesn't exist
+ Put nentries key/data pairs (from the dictionary) using a cursor
+ and KEYFIRST and KEYLAST (this tests the case where use use cursor
+ put for non-existent keys).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test037
+ Test DB_RMW
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test038
+ DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test
+
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test039
+ DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+ function.
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test.
+
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test040
+ Test038 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test041
+ Test039 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test042
+ Concurrent Data Store test (CDB)
+
+ Multiprocess DB test; verify that locking is working for the
+ concurrent access method product.
+
+ Use the first "nentries" words from the dictionary. Insert each with
+ self as key and a fixed, medium length data string. Then fire off
+ multiple processes that bang on the database. Each one should try to
+ read and write random keys. When they rewrite, they'll append their
+ pid to the data string (sometimes doing a rewrite sometimes doing a
+ partial put). Some will use cursors to traverse through a few keys
+ before finding one to write.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test043
+ Recno renumbering and implicit creation test
+ Test the Record number implicit creation and renumbering options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test044
+ Small system integration tests
+ Test proper functioning of the checkpoint daemon,
+ recovery, transactions, etc.
+
+ System integration DB test: verify that locking, recovery, checkpoint,
+ and all the other utilities basically work.
+
+ The test consists of $nprocs processes operating on $nfiles files. A
+ transaction consists of adding the same key/data pair to some random
+ number of these files. We generate a bimodal distribution in key size
+ with 70% of the keys being small (1-10 characters) and the remaining
+ 30% of the keys being large (uniform distribution about mean $key_avg).
+ If we generate a key, we first check to make sure that the key is not
+ already in the dataset. If it is, we do a lookup.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test045
+ Small random tester
+ Runs a number of random add/delete/retrieve operations.
+ Tests both successful conditions and error conditions.
+
+ Run the random db tester on the specified access method.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test046
+ Overwrite test of small/big key/data with cursor checks.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test047
+ DBcursor->c_get get test with SET_RANGE option.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test048
+ Cursor stability across Btree splits.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test049
+ Cursor operations on uninitialized cursors.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test050
+ Overwrite test of small/big key/data with cursor checks for Recno.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test051
+ Fixed-length record Recno test.
+ 0. Test various flags (legal and illegal) to open
+ 1. Test partial puts where dlen != size (should fail)
+ 2. Partial puts for existent record -- replaces at beg, mid, and
+ end of record, as well as full replace
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test052
+ Renumbering record Recno test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test053
+ Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+ methods.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test054
+ Cursor maintenance during key/data deletion.
+
+ This test checks for cursor maintenance in the presence of deletes.
+ There are N different scenarios to tests:
+ 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+ 2. No duplicates. Cursor is positioned right before key K, Delete K,
+ do a next on the cursor.
+ 3. No duplicates. Cursor is positioned on key K, do a regular delete
+ of K, do a current get on K.
+ 4. Repeat 3 but do a next instead of current.
+ 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+ does a delete. Then we do a non-cursor get.
+ 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ do a delete of the entire Key. Test cursor current.
+ 7. Continue last test and try cursor next.
+ 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ Cursor B is in the same duplicate set and deletes a different item.
+ Verify that the cursor is in the right place.
+ 9. Cursors A and B are in the place in the same duplicate set. A
+ deletes its item. Do current on B.
+ 10. Continue 8 and do a next on B.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test055
+ Basic cursor operations.
+ This test checks basic cursor operations.
+ There are N different scenarios to tests:
+ 1. (no dups) Set cursor, retrieve current.
+ 2. (no dups) Set cursor, retrieve next.
+ 3. (no dups) Set cursor, retrieve prev.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test056
+ Cursor maintenance during deletes.
+ Check if deleting a key when a cursor is on a duplicate of that
+ key works.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test057
+ Cursor maintenance during key deletes.
+ 1. Delete a key with a cursor. Add the key back with a regular
+ put. Make sure the cursor can't get the new item.
+ 2. Put two cursors on one item. Delete through one cursor,
+ check that the other sees the change.
+ 3. Same as 2, with the two cursors on a duplicate.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test058
+ Verify that deleting and reading duplicates results in correct ordering.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test059
+ Cursor ops work with a partial length of 0.
+ Make sure that we handle retrieves of zero-length data items correctly.
+ The following ops, should allow a partial data retrieve of 0-length.
+ db_get
+ db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test060
+ Test of the DB_EXCL flag to DB->open().
+ 1) Attempt to open and create a nonexistent database; verify success.
+ 2) Attempt to reopen it; verify failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test061
+ Test of txn abort and commit for in-memory databases.
+ a) Put + abort: verify absence of data
+ b) Put + commit: verify presence of data
+ c) Overwrite + abort: verify that data is unchanged
+ d) Overwrite + commit: verify that data has changed
+ e) Delete + abort: verify that data is still present
+ f) Delete + commit: verify that data has been deleted
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test062
+ Test of partial puts (using DB_CURRENT) onto duplicate pages.
+ Insert the first 200 words into the dictionary 200 times each with
+ self as key and <random letter>:self as data. Use partial puts to
+ append self again to data; verify correctness.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test063
+ Test of the DB_RDONLY flag to DB->open
+ Attempt to both DB->put and DBC->c_put into a database
+ that has been opened DB_RDONLY, and check for failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test064
+ Test of DB->get_type
+ Create a database of type specified by method.
+ Make sure DB->get_type returns the right thing with both a normal
+ and DB_UNKNOWN open.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test065
+ Test of DB->stat, both -DB_FAST_STAT and row
+ counts with DB->stat -txn.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test066
+ Test of cursor overwrites of DB_CURRENT w/ duplicates.
+
+ Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+ database with duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test067
+ Test of DB_CURRENT partial puts onto almost empty duplicate
+ pages, with and without DB_DUP_SORT.
+
+ Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+ This test was written to address the following issue, #2 in the
+ list of issues relating to bug #0820:
+
+ 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+ In Btree, the DB_CURRENT overwrite of off-page duplicate records
+ first deletes the record and then puts the new one -- this could
+ be a problem if the removal of the record causes a reverse split.
+ Suggested solution is to acquire a cursor to lock down the current
+ record, put a new record after that record, and then delete using
+ the held cursor.
+
+ It also tests the following, #5 in the same list of issues:
+ 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+ set, duplicate comparison routine specified.
+ The partial change does not change how data items sort, but the
+ record to be put isn't built yet, and that record supplied is the
+ one that's checked for ordering compatibility.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test068
+ Test of DB_BEFORE and DB_AFTER with partial puts.
+ Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+ check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test069
+ Test of DB_CURRENT partial puts without duplicates-- test067 w/
+ small ndups to ensure that partial puts to DB_CURRENT work
+ correctly in the absence of duplicate pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test070
+ Test of DB_CONSUME (Four consumers, 1000 items.)
+
+ Fork off six processes, four consumers and two producers.
+ The producers will each put 20000 records into a queue;
+ the consumers will each get 10000.
+ Then, verify that no record was lost or retrieved twice.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test071
+ Test of DB_CONSUME (One consumer, 10000 items.)
+ This is DB Test 70, with one consumer, one producers, and 10000 items.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test072
+ Test of cursor stability when duplicates are moved off-page.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test073
+ Test of cursor stability on duplicate pages.
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test074
+ Test of DB_NEXT_NODUP.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test076
+ Test creation of many small databases in a single environment. [#1528].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test077
+ Test of DB_GET_RECNO [#1206].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test078
+ Test of DBC->c_count(). [#303]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test079
+ Test of deletes in large trees. (test006 w/ sm. pagesize).
+
+ Check that delete operations work in large btrees. 10000 entries
+ and a pagesize of 512 push this out to a four-level btree, with a
+ small fraction of the entries going on overflow pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test081
+ Test off-page duplicates and overflow pages together with
+ very large keys (key/data as file contents).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test082
+ Test of DB_PREV_NODUP (uses test074).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test083
+ Test of DB->key_range.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test084
+ Basic sanity test (test001) with large (64K) pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test085
+ Test of cursor behavior when a cursor is pointing to a deleted
+ btree key which then has duplicates added. [#2473]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test086
+ Test of cursor stability across btree splits/rsplits with
+ subtransaction aborts (a variant of test048). [#2373]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test087
+ Test of cursor stability when converting to and modifying
+ off-page duplicate pages with subtransaction aborts. [#2373]
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each. Do each put twice,
+ first aborting, then committing, so we're sure to abort the move
+ to off-page dups at some point.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test088
+ Test of cursor stability across btree splits with very
+ deep trees (a variant of test048). [#2514]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test089
+ Concurrent Data Store test (CDB)
+
+ Enhanced CDB testing to test off-page dups, cursor dups and
+ cursor operations like c_del then c_get.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test090
+ Test for functionality near the end of the queue using test001.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test091
+ Test of DB_CONSUME_WAIT.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test092
+ Test of DB_DIRTY_READ [#3395]
+
+ We set up a database with nentries in it. We then open the
+ database read-only twice. One with dirty reads and one without.
+ We open the database for writing and update some entries in it.
+ Then read those new entries via db->get (clean and dirty), and
+ via cursors (clean and dirty).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test093
+ Test set_bt_compare (btree key comparison function) and
+ set_h_compare (hash key comparison function).
+
+ Open a database with a comparison function specified,
+ populate, and close, saving a list with that key order as
+ we do so. Reopen and read in the keys, saving in another
+ list; the keys should be in the order specified by the
+ comparison function. Sort the original saved list of keys
+ using the comparison function, and verify that it matches
+ the keys as read out of the database.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test094
+ Test using set_dup_compare.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test095
+ Bulk get test for methods supporting dups. [#2934]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test096
+ Db->truncate test.
+ For all methods:
+ Test that truncate empties an existing database.
+ Test that truncate-write in an aborted txn doesn't
+ change the original contents.
+ Test that truncate-write in a committed txn does
+ overwrite the original contents.
+ For btree and hash, do the same in a database with offpage dups.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test097
+ Open up a large set of database files simultaneously.
+ Adjust for local file descriptor resource limits.
+ Then use the first 1000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test098
+ Test of DB_GET_RECNO and secondary indices. Open a primary and
+ a secondary, and do a normal cursor get followed by a get_recno.
+ (This is a smoke test for "Bug #1" in [#5811].)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test099
+
+ Test of DB->get and DBC->c_get with set_recno and get_recno.
+
+ Populate a small btree -recnum database.
+ After all are entered, retrieve each using -recno with DB->get.
+ Open a cursor and do the same for DBC->c_get with set_recno.
+ Verify that set_recno sets the record number position properly.
+ Verify that get_recno returns the correct record numbers.
+
+ Using the same database, open 3 cursors and position one at
+ the beginning, one in the middle, and one at the end. Delete
+ by cursor and check that record renumbering is done properly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test100
+ Test for functionality near the end of the queue
+ using test025 (DB_APPEND).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test101
+ Test for functionality near the end of the queue
+ using test070 (DB_CONSUME).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test102
+ Bulk get test for record-based methods. [#2934]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test103
+ Test bulk get when record numbers wrap around.
+
+ Load database with items starting before and ending after
+ the record number wrap around point. Run bulk gets (-multi_key)
+ with various buffer sizes and verify the contents returned match
+ the results from a regular cursor get.
+
+ Then delete items to create a sparse database and make sure it
+ still works. Test both -multi and -multi_key since they behave
+ differently.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test106
+
+
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test107
+ Test of read-committed (degree 2 isolation). [#8689]
+
+ We set up a database. Open a read-committed transactional cursor and
+ a regular transactional cursor on it. Position each cursor on one page,
+ and do a put to a different page.
+
+ Make sure that:
+ - the put succeeds if we are using degree 2 isolation.
+ - the put deadlocks within a regular transaction with
+ a regular cursor.
+
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test109
+
+ Test of sequences.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test110
+ Partial get test with duplicates.
+
+ For hash and btree, create and populate a database
+ with dups. Randomly selecting offset and length,
+ retrieve data from each record and make sure we
+ get what we expect.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test111
+ Test database compaction.
+
+ Populate a database. Remove a high proportion of entries.
+ Dump and save contents. Compact the database, dump again,
+ and make sure we still have the same contents.
+ Add back some entries, delete more entries (this time by
+ cursor), dump, compact, and do the before/after check again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test112
+ Test database compaction with a deep tree.
+
+ This is a lot like test111, but with a large number of
+ entries and a small page size to make the tree deep.
+ To make it simple we use numerical keys all the time.
+
+ Dump and save contents. Compact the database, dump again,
+ and make sure we still have the same contents.
+ Add back some entries, delete more entries (this time by
+ cursor), dump, compact, and do the before/after check again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test113
+ Test database compaction with duplicates.
+
+ This is essentially test111 with duplicates.
+ To make it simple we use numerical keys all the time.
+
+ Dump and save contents. Compact the database, dump again,
+ and make sure we still have the same contents.
+ Add back some entries, delete more entries (this time by
+ cursor), dump, compact, and do the before/after check again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test114
+ Test database compaction with overflows.
+
+ Populate a database. Remove a high proportion of entries.
+ Dump and save contents. Compact the database, dump again,
+ and make sure we still have the same contents.
+ Add back some entries, delete more entries (this time by
+ cursor), dump, compact, and do the before/after check again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test115
+ Test database compaction with user-specified btree sort.
+
+ This is essentially test111 with the user-specified sort.
+ Populate a database. Remove a high proportion of entries.
+ Dump and save contents. Compact the database, dump again,
+ and make sure we still have the same contents.
+ Add back some entries, delete more entries (this time by
+ cursor), dump, compact, and do the before/after check again.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test116
+ Test of basic functionality of lsn_reset.
+
+ Create a database in an env. Copy it to a new file within
+ the same env. Reset the page LSNs.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test117
+ Test database compaction with requested fill percent.
+
+ Populate a database. Remove a high proportion of entries.
+ Dump and save contents. Compact the database, requesting
+ fill percentages starting at 10% and working our way up to
+ 100. On each cycle, make sure we still have the same contents.
+
+ Unlike the other compaction tests, this one does not
+ use -freespace.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test119
+ Test behavior when Berkeley DB returns DB_BUFFER_SMALL on a cursor.
+
+ If the user-supplied buffer is not large enough to contain
+ the returned value, DB returns BUFFER_SMALL. If it does,
+ check that the cursor does not move -- if it moves, it will
+ skip items. [#13815]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test120
+ Test of multi-version concurrency control.
+
+ Test basic functionality: a snapshot transaction started
+ before a regular transaction's put can't see the modification.
+ A snapshot transaction started after the put can see it.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test121
+ Tests of multi-version concurrency control.
+
+ MVCC and cursor adjustment.
+ Set up a -snapshot cursor and position it in the middle
+ of a database.
+ Write to the database, both before and after the cursor,
+ and verify that it stays on the same position.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test122
+ Tests of multi-version concurrency control.
+
+ MVCC and databases that turn multi-version on and off.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test123
+ Concurrent Data Store cdsgroup smoke test.
+
+ Open a CDS env with -cdb_alldb.
+ Start a "txn" with -cdsgroup.
+ Create two databases in the env, do a cursor put
+ in both within the same txn. This should succeed.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test125
+ Test cursor comparison API.
+
+ The cursor comparison API reports whether two cursors within
+ the same database are at the same position. It does not report
+ any information about relative position.
+
+ 1. Test two uninitialized cursors (error).
+ 2. Test one uninitialized cursor, one initialized (error).
+ 3. Test two cursors in different databases (error).
+ 4. Put two cursors in the same place, test for match. Walk
+ them back and forth a bit, more matching.
+ 5. Two cursors in the same spot. Delete through one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn001
+ Begin, commit, abort testing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn002
+ Verify that read-only transactions do not write log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn003
+ Test abort/commit/prepare of txns with outstanding child txns.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn004
+ Test of wraparound txnids (txn001)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn005
+ Test transaction ID wraparound and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn008
+ Test of wraparound txnids (txn002)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn009
+ Test of wraparound txnids (txn003)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn010
+ Test DB_ENV->txn_checkpoint arguments/flags
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn011
+ Test durable and non-durable txns.
+ Test a mixed env (with both durable and non-durable
+ dbs), then a purely non-durable env. Make sure commit
+ and abort work, and that only the log records we
+ expect are written.
+ Test that we can't get a durable handle on an open ND
+ database, or vice versa. Test that all subdb's
+ must be of the same type (D or ND).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn012
+ Test txn->getname and txn->setname.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn013
+ Test of txns used in the wrong environment.
+ Set up two envs. Start a txn in one env, and attempt to use it
+ in the other env. Verify we get the appropriate error message.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn014
+ Test of parent and child txns working on the same database.
+ A txn that will become a parent create a database.
+ A txn that will not become a parent creates another database.
+ Start a child txn of the 1st txn.
+ Verify that the parent txn is disabled while child is open.
+ 1. Child reads contents with child handle (should succeed).
+ 2. Child reads contents with parent handle (should succeed).
+ Verify that the non-parent txn can read from its database,
+ and that the child txn cannot.
+ Return to the child txn.
+ 3. Child writes with child handle (should succeed).
+ 4. Child writes with parent handle (should succeed).
+
+ Commit the child, verify that the parent can write again.
+ Check contents of database with a second child.
diff --git a/db-4.8.30/test/archive.tcl b/db-4.8.30/test/archive.tcl
new file mode 100644
index 0000000..f54491a
--- /dev/null
+++ b/db-4.8.30/test/archive.tcl
@@ -0,0 +1,255 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Options are:
+# -checkrec <checkpoint frequency"
+# -dir <dbhome directory>
+# -maxfilesize <maxsize of log file>
+proc archive { { inmem 0 } args } {
+ global alphabet
+ source ./include.tcl
+
+ # Set defaults
+ if { $inmem == 1 } {
+ set maxbsize [expr 8 * [expr 1024 * 1024]]
+ set desc "in-memory"
+ } else {
+ set maxbsize [expr 8 * 1024]
+ set desc "on-disk"
+ }
+ set maxfile [expr 32 * 1024]
+ set checkrec 500
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -c.* { incr i; set checkrec [lindex $args $i] }
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -m.* { incr i; set maxfile [lindex $args $i] }
+ default {
+ puts "FAIL:[timestamp] archive usage"
+ puts "usage: archive -checkrec <checkpt freq> \
+ -dir <directory> -maxfilesize <max size of log files>"
+ return
+ }
+ }
+ }
+
+ # Clean out old log if it existed
+ puts "Archive: Log archive test (using $desc logging)."
+ puts "Unlinking log: error message OK"
+ env_cleanup $testdir
+
+ # Now run the various functionality tests
+ if { $inmem == 0 } {
+ set eflags "-create -txn -home $testdir \
+ -log_buffer $maxbsize -log_max $maxfile"
+ } else {
+ set eflags "-create -txn -home $testdir -log_inmemory \
+ -log_buffer $maxbsize -log_max $maxfile"
+ }
+ set dbenv [eval {berkdb_env} $eflags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set logc [$dbenv log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
+
+ # The basic test structure here is that we write a lot of log
+ # records (enough to fill up 100 log files; each log file it
+ # small). We start with three txns and open a database in
+ # each transaction. Then, in a loop, we take periodic
+ # checkpoints. Between each pair of checkpoints, we end one
+ # transaction; when no transactions are left, we start up three
+ # new ones, letting them overlap checkpoints as well.
+ #
+ # The pattern that we create is:
+ # 1. Create TXN1, TXN2, TXN3 and open dbs within the txns.
+ # 2. Write a bunch of additional log records.
+ # 3. Checkpoint.
+ # 4. Archive, checking that we list the right files.
+ # 5. Commit one transaction.
+ # 6. If no txns left, start 3 new ones.
+ # 7. Until we've gone through enough records, return to step 2.
+
+ set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
+ puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
+ set nrecs $maxfile
+ set rec 0:$baserec
+
+ # Begin 1st transaction and record current log file. Open
+ # a database in the transaction; the log file won't be
+ # removable until the transaction is aborted or committed.
+ set t1 [$dbenv txn]
+ error_check_good t1:txn_begin [is_valid_txn $t1 $dbenv] TRUE
+
+ set l1 [lindex [lindex [$logc get -last] 0] 0]
+ set lsnlist [list $l1]
+
+ set tdb1 [eval {berkdb_open -create -mode 0644} \
+ -env $dbenv -txn $t1 -btree tdb1.db]
+ error_check_good dbopen [is_valid_db $tdb1] TRUE
+
+ # Do the same for a 2nd and 3rd transaction.
+ set t2 [$dbenv txn]
+ error_check_good t2:txn_begin [is_valid_txn $t2 $dbenv] TRUE
+ set l2 [lindex [lindex [$logc get -last] 0] 0]
+ lappend lsnlist $l2
+ set tdb2 [eval {berkdb_open -create -mode 0644} \
+ -env $dbenv -txn $t2 -btree tdb2.db]
+ error_check_good dbopen [is_valid_db $tdb2] TRUE
+
+ set t3 [$dbenv txn]
+ error_check_good t3:txn_begin [is_valid_txn $t3 $dbenv] TRUE
+ set l3 [lindex [lindex [$logc get -last] 0] 0]
+ lappend lsnlist $l3
+ set tdb3 [eval {berkdb_open -create -mode 0644} \
+ -env $dbenv -txn $t3 -btree tdb3.db]
+ error_check_good dbopen [is_valid_db $tdb3] TRUE
+
+ # Keep a list of active transactions and databases opened
+ # within those transactions.
+ set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"]
+
+ # Loop through a large number of log records, checkpointing
+ # and checking db_archive periodically.
+ for { set i 1 } { $i <= $nrecs } { incr i } {
+ set rec $i:$baserec
+ set lsn [$dbenv log_put $rec]
+ error_check_bad log_put [llength $lsn] 0
+ if { [expr $i % $checkrec] == 0 } {
+
+ # Take a checkpoint
+ $dbenv txn_checkpoint
+ set ckp_file [lindex [lindex [$logc get -last] 0] 0]
+ catch { archive_command -h $testdir -a } res_log_full
+ if { [string first db_archive $res_log_full] == 0 } {
+ set res_log_full ""
+ }
+ catch { archive_command -h $testdir } res_log
+ if { [string first db_archive $res_log] == 0 } {
+ set res_log ""
+ }
+ catch { archive_command -h $testdir -l } res_alllog
+ catch { archive_command -h $testdir -a -s } \
+ res_data_full
+ catch { archive_command -h $testdir -s } res_data
+
+ if { $inmem == 0 } {
+ error_check_good nlogfiles [llength $res_alllog] \
+ [lindex [lindex [$logc get -last] 0] 0]
+ } else {
+ error_check_good nlogfiles [llength $res_alllog] 0
+ }
+
+ error_check_good logs_match [llength $res_log_full] \
+ [llength $res_log]
+ error_check_good data_match [llength $res_data_full] \
+ [llength $res_data]
+
+ # Check right number of log files
+ if { $inmem == 0 } {
+ set expected [min $ckp_file [expr [lindex $lsnlist 0] - 1]]
+ error_check_good nlogs [llength $res_log] $expected
+ }
+
+ # Check that the relative names are a subset of the
+ # full names
+ set n 0
+ foreach x $res_log {
+ error_check_bad log_name_match:$res_log \
+ [string first $x \
+ [lindex $res_log_full $n]] -1
+ incr n
+ }
+
+ set n 0
+ foreach x $res_data {
+ error_check_bad log_name_match:$res_data \
+ [string first $x \
+ [lindex $res_data_full $n]] -1
+ incr n
+ }
+
+ # Commit a transaction and close the associated db.
+ set t [lindex [lindex $txnlist 0] 0]
+ set tdb [lindex [lindex $txnlist 0] 1]
+ if { [string length $t] != 0 } {
+ error_check_good txn_commit:$t [$t commit] 0
+ error_check_good tdb_close:$tdb [$tdb close] 0
+ set txnlist [lrange $txnlist 1 end]
+ set lsnlist [lrange $lsnlist 1 end]
+ }
+
+ # If we're down to no transactions, start some new ones.
+ if { [llength $txnlist] == 0 } {
+ set t1 [$dbenv txn]
+ error_check_bad tx_begin $t1 NULL
+ error_check_good \
+ tx_begin [is_substr $t1 $dbenv] 1
+ set tdb1 [eval {berkdb_open -create -mode 0644} \
+ -env $dbenv -txn $t1 -btree tdb1.db]
+ error_check_good dbopen [is_valid_db $tdb1] TRUE
+ set l1 [lindex [lindex [$logc get -last] 0] 0]
+ lappend lsnlist $l1
+
+ set t2 [$dbenv txn]
+ error_check_bad tx_begin $t2 NULL
+ error_check_good \
+ tx_begin [is_substr $t2 $dbenv] 1
+ set tdb2 [eval {berkdb_open -create -mode 0644} \
+ -env $dbenv -txn $t2 -btree tdb2.db]
+ error_check_good dbopen [is_valid_db $tdb2] TRUE
+ set l2 [lindex [lindex [$logc get -last] 0] 0]
+ lappend lsnlist $l2
+
+ set t3 [$dbenv txn]
+ error_check_bad tx_begin $t3 NULL
+ error_check_good \
+ tx_begin [is_substr $t3 $dbenv] 1
+ set tdb3 [eval {berkdb_open -create -mode 0644} \
+ -env $dbenv -txn $t3 -btree tdb3.db]
+ error_check_good dbopen [is_valid_db $tdb3] TRUE
+ set l3 [lindex [lindex [$logc get -last] 0] 0]
+ lappend lsnlist $l3
+
+ set txnlist [list "$t1 $tdb1" "$t2 $tdb2" "$t3 $tdb3"]
+ }
+ }
+ }
+ # Commit any transactions still running.
+ puts "\tArchive.b: Commit any transactions still running."
+ foreach pair $txnlist {
+ set t [lindex $pair 0]
+ set tdb [lindex $pair 1]
+ error_check_good txn_commit:$t [$t commit] 0
+ error_check_good tdb_close:$tdb [$tdb close] 0
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor_close [$logc close] 0
+ reset_env $dbenv
+}
+
+proc archive_command { args } {
+ source ./include.tcl
+
+ # Catch a list of files output by db_archive.
+ catch { eval exec $util_path/db_archive $args } output
+
+ if { $is_windows_test == 1 || 1 } {
+ # On Windows, convert all filenames to use forward slashes.
+ regsub -all {[\\]} $output / output
+ }
+
+ # Output the [possibly-transformed] list.
+ return $output
+}
+
+proc min { a b } {
+ if {$a < $b} {
+ return $a
+ } else {
+ return $b
+ }
+}
diff --git a/db-4.8.30/test/backup.tcl b/db-4.8.30/test/backup.tcl
new file mode 100644
index 0000000..030a5b7
--- /dev/null
+++ b/db-4.8.30/test/backup.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST backup
+# TEST Test of hotbackup functionality.
+# TEST
+# TEST Do all the of the following tests with and without
+# TEST the -c (checkpoint) option. Make sure that -c and
+# TEST -d (data_dir) are not allowed together.
+# TEST
+# TEST (1) Test that plain and simple hotbackup works.
+# TEST (2) Test with -data_dir (-d).
+# TEST (3) Test updating an existing hot backup (-u).
+# TEST (4) Test with absolute path.
+# TEST (5) Test with DB_CONFIG (-D), setting log_dir (-l)
+# TEST and data_dir (-d).
+# TEST (6) DB_CONFIG and update.
+# TEST (7) Repeat hot backup (non-update) with DB_CONFIG and
+# TEST existing directories.
+
+proc backup { {nentries 1000} } {
+ source ./include.tcl
+ global util_path
+
+ set omethod "-btree"
+ set testfile "foo.db"
+ set backupdir "backup"
+
+ # Set up small logs so we quickly create more than one.
+ set log_size 20000
+ set env_flags " -create -txn -home $testdir -log_max $log_size"
+ set db_flags " -create $omethod -auto_commit $testfile "
+
+ foreach option { checkpoint nocheckpoint } {
+ if { $option == "checkpoint" } {
+ set c "c"
+ set msg "with checkpoint"
+ } else {
+ set c ""
+ set msg "without checkpoint"
+ }
+ puts "Backuptest $msg."
+
+ env_cleanup $testdir
+ env_cleanup $backupdir
+
+ set env [eval {berkdb_env} $env_flags]
+ set db [eval {berkdb_open} -env $env $db_flags]
+ set txn [$env txn]
+ populate $db $omethod $txn $nentries 0 0
+ $txn commit
+
+ # Backup directory is empty before hot backup.
+ set files [glob -nocomplain $backupdir/*]
+ error_check_good no_files [llength $files] 0
+
+ puts "\tBackuptest.a: Hot backup to directory $backupdir."
+ if {[catch { eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir } res ] } {
+ error "FAIL: $res"
+ }
+
+ set logfiles [glob $backupdir/log*]
+ error_check_bad found_logs [llength $logfiles] 0
+ error_check_good found_db [file exists $backupdir/$testfile] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ puts "\tBackuptest.b: Hot backup with data_dir."
+ file mkdir $testdir/data1
+ error_check_good db_data_dir\
+ [file exists $testdir/data1/$testfile] 0
+
+ # Create a new env with data_dir.
+ set env [eval {berkdb_env_noerr} $env_flags -data_dir data1]
+ set db [eval {berkdb_open} -env $env $db_flags]
+ set txn [$env txn]
+ populate $db $omethod $txn $nentries 0 0
+ $txn commit
+
+ # Check that data went into data_dir.
+ error_check_good db_data_dir\
+ [file exists $testdir/data1/$testfile] 1
+
+ # You may not specify both -d (data_dir) and -c (checkpoint).
+ set msg2 "cannot specify -d and -c"
+ if { $option == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir\
+ -d $testdir/data1} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir\
+ -d $testdir/data1} res] } {
+ error "FAIL: $res"
+ }
+ # Check that logs and db are in backupdir.
+ error_check_good db_backup\
+ [file exists $backupdir/$testfile] 1
+ set logfiles [glob $backupdir/log*]
+ error_check_bad logs_backed_up [llength $logfiles] 0
+ }
+
+ # Add more data and try the "update" flag.
+ puts "\tBackuptest.c: Update existing hot backup."
+ set txn [$env txn]
+ populate $db $omethod $txn [expr $nentries * 2] 0 0
+ $txn commit
+
+ if { $option == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup -d $testdir/data1} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup\
+ -d $testdir/data1} res] } {
+ error "FAIL: $res"
+ }
+ # There should be more log files now.
+ set newlogfiles [glob $backupdir/log*]
+ error_check_bad more_logs $newlogfiles $logfiles
+ }
+
+ puts "\tBackuptest.d: Hot backup with full path."
+ set fullpath [pwd]
+ if { $option == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b backup\
+ -d $fullpath/$testdir/data1} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b backup\
+ -d $fullpath/$testdir/data1} res] } {
+ error "FAIL: $res"
+ }
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+ env_cleanup $backupdir
+
+ puts "\tBackuptest.e: Hot backup with DB_CONFIG."
+ backuptest_makeconfig
+
+ set env [eval {berkdb_env_noerr} $env_flags]
+ set db [eval {berkdb_open} -env $env $db_flags]
+ set txn [$env txn]
+ populate $db $omethod $txn $nentries 0 0
+ $txn commit
+
+ if { $option == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir -l $testdir/logs\
+ -d $testdir/data1} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir -l $testdir/logs\
+ -d $testdir/data1} res] } {
+ error "FAIL: $res"
+ }
+ # Check that logs and db are in backupdir.
+ error_check_good db_backup\
+ [file exists $backupdir/$testfile] 1
+ set logfiles [glob $backupdir/log*]
+ error_check_bad logs_backed_up [llength $logfiles] 0
+ }
+
+ set txn [$env txn]
+ populate $db $omethod $txn [expr $nentries * 2] 0 0
+ $txn commit
+
+ puts "\tBackuptest.f:\
+ Hot backup update with DB_CONFIG."
+ if { $option == "checkpoint" } {
+ catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup -l $testdir/logs\
+ -d $testdir/data1} res
+ error_check_good c_and_d [is_substr $res $msg2] 1
+ } else {
+ if {[catch {eval exec $util_path/db_hotbackup\
+ -${c}vuh $testdir -b backup -l $testdir/logs\
+ -d $testdir/data1} res] } {
+ error "FAIL: $res"
+ }
+ # There should be more log files now.
+ set newlogfiles [glob $backupdir/log*]
+ error_check_bad more_logs $newlogfiles $logfiles
+ }
+
+ # Repeat with directories already there to test cleaning.
+ # We are not doing an update this time.
+ puts "\tBackuptest.g:\
+ Hot backup with DB_CONFIG (non-update)."
+ if { [catch { eval exec $util_path/db_hotbackup\
+ -${c}vh $testdir -b $backupdir -D } res] } {
+ error "FAIL: $res"
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+}
+
+proc backuptest_makeconfig { } {
+ source ./include.tcl
+
+ file mkdir $testdir/logs
+ file mkdir $testdir/data1
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_lg_dir logs"
+ puts $cid "set_data_dir data1"
+ close $cid
+}
+
diff --git a/db-4.8.30/test/bigfile001.tcl b/db-4.8.30/test/bigfile001.tcl
new file mode 100644
index 0000000..05763d3
--- /dev/null
+++ b/db-4.8.30/test/bigfile001.tcl
@@ -0,0 +1,79 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST bigfile001
+# TEST Create a database greater than 4 GB in size. Close, verify.
+# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
+# TEST repeat. Since it will not work on all systems, this test is
+# TEST not run by default.
+proc bigfile001 { { itemsize 4096 } \
+ { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
+ source ./include.tcl
+
+ set method "btree"
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ global is_fat32
+ if { $is_fat32 } {
+ puts "Skipping bigfile001 for FAT32 file system."
+ return
+ }
+ puts "Bigfile001: $method ($args) $nitems * $itemsize bytes of data"
+
+ env_cleanup $testdir
+
+ # Create the database. Use 64K pages; we want a good fill
+ # factor, and page size doesn't matter much. Use a 50MB
+ # cache; that should be manageable, and will help
+ # performance.
+ set dbname $testdir/big.db
+
+ set db [eval {berkdb_open -create} {-pagesize 65536 \
+ -cachesize {0 50000000 0}} $omethod $args $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tBigfile001.a: Creating database..."
+ flush stdout
+
+ set data [string repeat z $itemsize]
+
+ for { set i 0 } { $i < $nitems } { incr i } {
+ set key key[format %08u $i]
+
+ error_check_good db_put($i) [$db put $key $data] 0
+
+ if { $i % 50000 == 0 } {
+ set pct [expr 100 * $i / $nitems]
+ puts "\tBigfile001.a: $pct%..."
+ flush stdout
+ }
+ }
+ puts "\tBigfile001.a: 100%."
+ error_check_good db_close [$db close] 0
+
+ puts "\tBigfile001.b: Verifying database..."
+ error_check_good verify \
+ [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
+
+ puts "\tBigfile001.c: Grow database $growtms times by $growby items"
+
+ for { set j 0 } { $j < $growtms } { incr j } {
+ set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ puts -nonewline "\t\tBigfile001.c.1: Adding $growby items..."
+ flush stdout
+ for { set i 0 } { $i < $growby } { incr i } {
+ set key key[format %08u $i].$j
+ error_check_good db_put($j.$i) [$db put $key $data] 0
+ }
+ error_check_good db_close [$db close] 0
+ puts "done."
+
+ puts "\t\tBigfile001.c.2: Verifying database..."
+ error_check_good verify($j) \
+ [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
+ }
+}
diff --git a/db-4.8.30/test/bigfile002.tcl b/db-4.8.30/test/bigfile002.tcl
new file mode 100644
index 0000000..35827fc
--- /dev/null
+++ b/db-4.8.30/test/bigfile002.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST bigfile002
+# TEST This one should be faster and not require so much disk space,
+# TEST although it doesn't test as extensively. Create an mpool file
+# TEST with 1K pages. Dirty page 6000000. Sync.
+proc bigfile002 { args } {
+ source ./include.tcl
+ global is_fat32
+ if { $is_fat32 } {
+ puts "Skipping bigfile002 for FAT32 file system."
+ return
+ }
+ puts "Bigfile002: Creating large, sparse file through mpool..."
+ flush stdout
+
+ env_cleanup $testdir
+
+ # Create env.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good valid_env [is_valid_env $env] TRUE
+
+ # Create the file.
+ set name big002.file
+ set file [$env mpool -create -pagesize 1024 $name]
+
+ # Dirty page 6000000
+ set pg [$file get -create 6000000]
+ error_check_good pg_init [$pg init A] 0
+ error_check_good pg_set [$pg is_setto A] 1
+
+ # Put page back.
+ error_check_good pg_put [$pg put] 0
+
+ # Fsync.
+ error_check_good fsync [$file fsync] 0
+
+ # Close.
+ error_check_good fclose [$file close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/db-4.8.30/test/byteorder.tcl b/db-4.8.30/test/byteorder.tcl
new file mode 100644
index 0000000..73563d0
--- /dev/null
+++ b/db-4.8.30/test/byteorder.tcl
@@ -0,0 +1,33 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Byte Order Test
+# Use existing tests and run with both byte orders.
+proc byteorder { method {nentries 1000} } {
+ source ./include.tcl
+ puts "Byteorder: $method $nentries"
+
+ eval {test001 $method $nentries 0 0 "001" -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test001 $method $nentries 0 0 "001" -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 "010" -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 "010" -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 "011" -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 "011" -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 4321}
+ eval {verify_dir $testdir}
+}
diff --git a/db-4.8.30/test/conscript.tcl b/db-4.8.30/test/conscript.tcl
new file mode 100644
index 0000000..1a254e7
--- /dev/null
+++ b/db-4.8.30/test/conscript.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Script for DB_CONSUME test (test070.tcl).
+# Usage: conscript dir file runtype nitems outputfile tnum args
+# dir: DBHOME directory
+# file: db file on which to operate
+# runtype: PRODUCE, CONSUME or WAIT -- which am I?
+# nitems: number of items to put or get
+# outputfile: where to log consumer results
+# tnum: test number
+
+proc consumescript_produce { db_cmd nitems tnum args } {
+ source ./include.tcl
+ global mydata
+
+ set pid [pid]
+ puts "\tTest$tnum: Producer $pid starting, producing $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oret -1
+ set ret 0
+ for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
+ set oret $ret
+ if { 0xffffffff > 0 && $oret > 0x7fffffff } {
+ incr oret [expr 0 - 0x100000000]
+ }
+ set ret [$db put -append [chop_data q $mydata]]
+ error_check_good db_put \
+ [expr $oret > $ret ? \
+ ($oret > 0x7fffffff && $ret < 0x7fffffff) : 1] 1
+
+ }
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest$tnum: Producer $pid finished."
+}
+
+proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
+ source ./include.tcl
+ global mydata
+ set pid [pid]
+ puts "\tTest$tnum: Consumer $pid starting, seeking $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oid [open $outputfile a]
+
+ for { set ndx 0 } { $ndx < $nitems } { } {
+ set ret [$db get $mode]
+ if { [llength $ret] > 0 } {
+ error_check_good correct_data:$pid \
+ [lindex [lindex $ret 0] 1] [pad_data q $mydata]
+ set rno [lindex [lindex $ret 0] 0]
+ puts $oid $rno
+ incr ndx
+ } else {
+ # No data to consume; wait.
+ }
+ }
+
+ error_check_good output_close:$pid [close $oid] ""
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest$tnum: Consumer $pid finished."
+}
+
+source ./include.tcl
+source $test_path/test.tcl
+
+# Verify usage
+if { $argc < 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+set usage "conscript.tcl dir file runtype nitems outputfile tnum"
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set file [lindex $argv 1]
+set runtype [lindex $argv 2]
+set nitems [lindex $argv 3]
+set outputfile [lindex $argv 4]
+set tnum [lindex $argv 5]
+# args is the string "{ -len 20 -pad 0}", so we need to extract the
+# " -len 20 -pad 0" part.
+set args [lindex [lrange $argv 6 end] 0]
+
+set mydata "consumer data"
+
+# Open env
+set dbenv [berkdb_env -home $dir ]
+error_check_good db_env_create [is_valid_env $dbenv] TRUE
+
+# Figure out db opening command.
+set db_cmd [concat {berkdb_open -create -mode 0644 -queue -env}\
+ $dbenv $args $file]
+
+# Invoke consumescript_produce or consumescript_consume based on $runtype
+if { $runtype == "PRODUCE" } {
+ # Producers have nothing to log; make sure outputfile is null.
+ error_check_good no_producer_outputfile $outputfile ""
+ consumescript_produce $db_cmd $nitems $tnum $args
+} elseif { $runtype == "CONSUME" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume $args
+} elseif { $runtype == "WAIT" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume_wait \
+ $args
+} else {
+ error_check_good bad_args $runtype \
+ "either PRODUCE, CONSUME, or WAIT"
+}
+error_check_good env_close [$dbenv close] 0
+exit
diff --git a/db-4.8.30/test/db_reptest.tcl b/db-4.8.30/test/db_reptest.tcl
new file mode 100644
index 0000000..fcd2ec8
--- /dev/null
+++ b/db-4.8.30/test/db_reptest.tcl
@@ -0,0 +1,778 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999,2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST db_reptest
+# TEST Wrapper to configure and run the db_reptest program.
+
+#
+# TODO:
+# late client start.
+# Number of message proc threads.
+#
+
+global last_nsites
+set last_nsites 0
+
+#
+# There are 3 user-level procs that the user may invoke.
+# 1. db_reptest - Runs randomized configurations in a loop.
+# 2. basic_db_reptest - Runs a simple set configuration once,
+# as a smoke test.
+# 3. restore_db_reptest 'dir' - Runs the configuration given in 'dir'
+# in a loop. The purpose is either to reproduce a problem
+# that some configuration encountered, or test a fix.
+#
+
+#
+# db_reptest - Run a randomized configuration. Run the test
+# 'count' times in a loop, or if no count it given, it is
+# an infinite loop.
+#
+proc db_reptest { {count -1} } {
+ global rand_init
+
+ berkdb srand $rand_init
+ set cmd "db_reptest_int random"
+ db_reptest_loop $cmd $count
+}
+
+#
+# Run a basic reptest. The types are:
+# Basic 0 - Two sites, start with site 1 as master, 5 worker threads, btree,
+# run 100 seconds, onesite remote knowledge.
+# Basic 1 - Three sites, all sites start as client, 5 worker threads, btree
+# run 150 seconds, full remote knowledge.
+#
+proc basic_db_reptest { { basic 0 } } {
+ global util_path
+
+ if { [file exists $util_path/db_reptest] == 0 } {
+ puts "Skipping db_reptest. Is it built?"
+ return
+ }
+ if { $basic == 0 } {
+ db_reptest_int basic0
+ }
+ if { $basic == 1 } {
+ db_reptest_int basic1
+ }
+}
+
+#
+# Restore a configuration from the given directory and
+# run that configuration in a loop 'count' times.
+#
+proc restore_db_reptest { restoredir { count -1 } } {
+ set cmd "db_reptest_int restore $restoredir/SAVE_RUN"
+ db_reptest_loop $cmd $count
+}
+
+#
+# Wrapper to run the command in a loop, 'count' times.
+#
+proc db_reptest_loop { cmd count } {
+ global util_path
+
+ if { [file exists $util_path/db_reptest] == 0 } {
+ puts "Skipping db_reptest. Is it built?"
+ return
+ }
+ set iteration 1
+ while { 1 } {
+ puts -nonewline "ITERATION $iteration: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+
+ #
+ eval $cmd
+
+ puts -nonewline "COMPLETED $iteration: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ incr iteration
+ if { $count > 0 && $iteration > $count } {
+ break
+ }
+ }
+}
+
+#
+# Internal version of db_reptest that all user-level procs
+# eventually call. It will configure a single run of
+# db_reptest based on the configuration type specified
+# in 'cfgtype'. This proc will:
+# Configure a run of db_reptest
+# Run db_reptest
+# Verify the sites after db_reptest completes.
+#
+proc db_reptest_int { cfgtype { restoredir NULL } } {
+ source ./include.tcl
+ global rporttype
+
+ env_cleanup $testdir
+
+ set savedir TESTDIR/SAVE_RUN
+ reptest_cleanup $savedir
+
+ #
+ # Get all the default or random values needed for the test
+ # and its args first.
+ #
+ set runtime 0
+ set kill 0
+ #
+ # Get number of sites first because pretty much everything else
+ # after here depends on how many sites there are.
+ #
+ set num_sites [get_nsites $cfgtype $restoredir]
+ set use_lease [get_lease $cfgtype $restoredir]
+ #
+ # Only use kill if we have > 2 sites.
+ # Returns the site number of the site to kill, or 0
+ # if this will not be a kill test.
+ #
+ if { $num_sites > 2 } {
+ set kill [get_kill $cfgtype $restoredir $num_sites]
+ }
+ if { $cfgtype != "restore" } {
+ if { $use_lease } {
+ set use_master 0
+ } else {
+ set use_master [get_usemaster $cfgtype]
+ }
+ set master_site [get_mastersite $cfgtype $use_master $num_sites]
+ set workers [get_workers $cfgtype $use_lease]
+ set dbtype [get_dbtype $cfgtype]
+ set runtime [get_runtime $cfgtype]
+ set use_peers [get_peers $cfgtype]
+ puts -nonewline "Running: $num_sites sites, $runtime seconds "
+ if { $kill } {
+ puts -nonewline "kill site $kill "
+ }
+ if { $use_lease } {
+ puts "with leases"
+ } elseif { $use_master } {
+ puts "master site $master_site"
+ } else {
+ puts "no master"
+ }
+ }
+ set baseport 6100
+ set rporttype NULL
+ #
+ # This loop sets up the args to the invocation of db_reptest
+ # for each site.
+ #
+ for { set i 1 } {$i <= $num_sites } { incr i } {
+ set envdirs($i) TESTDIR/ENV$i
+ reptest_cleanup $envdirs($i)
+ #
+ # If we are restoring the args, just read them from the
+ # saved location for this sites. Otherwise build up
+ # the args for each piece we need.
+ #
+ if { $cfgtype == "restore" } {
+ set cid [open $restoredir/DB_REPTEST_ARGS.$i r]
+ set prog_args($i) [read $cid]
+ close $cid
+ if { $runtime == 0 } {
+ set runtime [parse_runtime $prog_args($i)]
+ puts "Runtime: $runtime"
+ }
+ } else {
+ set prog_args($i) \
+ "-v -c $workers -t $dbtype -T $runtime "
+ set prog_args($i) \
+ [concat $prog_args($i) "-h $envdirs($i)"]
+ #
+ # Add in if this site should kill itself.
+ #
+ if { $kill == $i } {
+ set prog_args($i) [concat $prog_args($i) "-k"]
+ }
+ #
+ # Add in if this site starts as a master or client.
+ #
+ if { $i == $master_site } {
+ set state($i) MASTER
+ set prog_args($i) [concat $prog_args($i) "-M"]
+ } else {
+ set state($i) CLIENT
+ #
+ # If we have a master, then we just want to
+ # start as a client. Otherwise start with
+ # elections.
+ #
+ if { $use_master } {
+ set prog_args($i) \
+ [concat $prog_args($i) "-C"]
+ } else {
+ set prog_args($i) \
+ [concat $prog_args($i) "-E"]
+ }
+ }
+ #
+ # Add in host:port configuration, both this site's
+ # local address and any remote addresses it knows.
+ #
+ set lport($i) [expr $baseport + $i]
+ set prog_args($i) \
+ [concat $prog_args($i) "-l localhost:$lport($i)"]
+ set rport($i) [get_rport $baseport $i \
+ $num_sites $cfgtype]
+ if { $use_peers } {
+ set remote_arg "-R"
+ } else {
+ set remote_arg "-r"
+ }
+ foreach p $rport($i) {
+ set prog_args($i) \
+ [concat $prog_args($i) $remote_arg \
+ "localhost:$p"]
+ }
+ }
+ save_db_reptest $savedir ARGS $i $prog_args($i)
+ }
+
+ # Now make the DB_CONFIG file for each site.
+ reptest_make_config $savedir $num_sites envdirs state \
+ $use_lease $cfgtype $restoredir
+
+ # Run the test
+ run_db_reptest $savedir $num_sites $runtime
+ puts "Test run complete. Verify."
+
+ # Verify the test run.
+ verify_db_reptest $num_sites envdirs $kill
+
+}
+
+#
+# Make a DB_CONFIG file for all sites in the group
+#
+proc reptest_make_config { savedir nsites edirs st lease cfgtype restoredir } {
+ upvar $edirs envdirs
+ upvar $st state
+
+ #
+ # Generate global config values that should be the same
+ # across all sites, such as number of sites and log size, etc.
+ #
+ set default_cfglist {
+ { "rep_set_nsites" $nsites }
+ { "rep_set_request" "150000 2400000" }
+ { "rep_set_timeout" "db_rep_checkpoint_delay 0" }
+ { "rep_set_timeout" "db_rep_connection_retry 2000000" }
+ { "rep_set_timeout" "db_rep_heartbeat_monitor 1000000" }
+ { "rep_set_timeout" "db_rep_heartbeat_send 500000" }
+ { "set_cachesize" "0 536870912 1" }
+ { "set_lg_max" "131072" }
+ { "set_lk_detect" "db_lock_default" }
+ { "set_verbose" "db_verb_recovery" }
+ { "set_verbose" "db_verb_replication" }
+ }
+
+ set acks { db_repmgr_acks_all db_repmgr_acks_all_peers \
+ db_repmgr_acks_none db_repmgr_acks_one db_repmgr_acks_one_peer \
+ db_repmgr_acks_quorum }
+
+ #
+ # Ack policy must be the same on all sites.
+ #
+ if { $cfgtype == "random" } {
+ if { $lease } {
+ set ackpolicy db_repmgr_acks_quorum
+ } else {
+ set done 0
+ while { $done == 0 } {
+ set acksz [expr [llength $acks] - 1]
+ set myack [berkdb random_int 0 $acksz]
+ set ackpolicy [lindex $acks $myack]
+ #
+ # Only allow the "none" policy with 2 sites
+ # otherwise it can overwhelm the system and
+ # it is a rarely used option.
+ #
+ if { $ackpolicy == "db_repmgr_acks_none" && \
+ $nsites > 2 } {
+ continue
+ }
+ set done 1
+ }
+ }
+ } else {
+ set ackpolicy db_repmgr_acks_one
+ }
+ for { set i 1 } { $i <= $nsites } { incr i } {
+ #
+ # If we're restoring we just need to copy it.
+ #
+ if { $cfgtype == "restore" } {
+ file copy $restoredir/DB_CONFIG.$i \
+ $envdirs($i)/DB_CONFIG
+ file copy $restoredir/DB_CONFIG.$i \
+ $savedir/DB_CONFIG.$i
+ continue
+ }
+ #
+ # Otherwise set up per-site config information
+ #
+ set cfglist $default_cfglist
+
+ #
+ # Add lease configuration if needed. We're running all
+ # locally, so there is no clock skew.
+ #
+ if { $lease } {
+ #
+ # We need to have an ack timeout > lease timeout.
+ # Otherwise txns can get committed without waiting
+ # long enough for leases to get granted.
+ #
+ lappend cfglist { "rep_set_config" "db_rep_conf_lease" }
+ lappend cfglist { "rep_set_timeout" \
+ "db_rep_lease_timeout 10000000" }
+ lappend cfglist \
+ { "rep_set_timeout" "db_rep_ack_timeout 20000000" }
+ } else {
+ lappend cfglist \
+ { "rep_set_timeout" "db_rep_ack_timeout 5000000" }
+ }
+
+ #
+ # Priority
+ #
+ if { $state($i) == "MASTER" } {
+ lappend cfglist { "rep_set_priority" 100 }
+ } else {
+ if { $cfgtype == "random" } {
+ set pri [berkdb random_int 10 25]
+ } else {
+ set pri 20
+ }
+ lappend cfglist { "rep_set_priority" $pri }
+ }
+ #
+ # Others: limit size, bulk, 2site strict,
+ #
+ if { $cfgtype == "random" } {
+ set limit_sz [berkdb random_int 15000 1000000]
+ set bulk [berkdb random_int 0 1]
+ if { $bulk } {
+ lappend cfglist \
+ { "rep_set_config" "db_rep_conf_bulk" }
+ }
+ if { $nsites == 2 } {
+ set strict [berkdb random_int 0 1]
+ if { $strict } {
+ lappend cfglist { "rep_set_config" \
+ "db_repmgr_conf_2site_strict" }
+ }
+ }
+ } else {
+ set limit_sz 100000
+ }
+ lappend cfglist { "rep_set_limit" "0 $limit_sz" }
+ lappend cfglist { "repmgr_set_ack_policy" $ackpolicy }
+ set cid [open $envdirs($i)/DB_CONFIG a]
+ foreach c $cfglist {
+ set carg [subst [lindex $c 0]]
+ set cval [subst [lindex $c 1]]
+ puts $cid "$carg $cval"
+ }
+ close $cid
+ set cid [open $envdirs($i)/DB_CONFIG r]
+ set cfg [read $cid]
+ close $cid
+
+ save_db_reptest $savedir CONFIG $i $cfg
+ }
+
+}
+
+proc reptest_cleanup { dir } {
+ #
+ # For now, just completely remove it all. We might want
+ # to use env_cleanup at some point in the future.
+ #
+ fileremove -f $dir
+ file mkdir $dir
+}
+
+
+proc save_db_reptest { savedir op site savelist } {
+ #
+ # Save a copy of the configuration and args used to run this
+ # instance of the test.
+ #
+ if { $op == "CONFIG" } {
+ set outfile $savedir/DB_CONFIG.$site
+ } else {
+ set outfile $savedir/DB_REPTEST_ARGS.$site
+ }
+ set cid [open $outfile a]
+ puts -nonewline $cid $savelist
+ close $cid
+}
+
+proc run_db_reptest { savedir numsites runtime } {
+ source ./include.tcl
+ global killed_procs
+
+ set pids {}
+ for {set i 1} {$i <= $numsites} {incr i} {
+ lappend pids [exec $tclsh_path $test_path/wrap_reptest.tcl \
+ $savedir/DB_REPTEST_ARGS.$i $savedir/site$i.log &]
+ tclsleep 1
+ }
+ watch_procs $pids 15 [expr $runtime * 3]
+ set killed [llength $killed_procs]
+ if { $killed > 0 } {
+ error "Processes $killed_procs never finished"
+ }
+}
+
+proc verify_db_reptest { num_sites edirs kill } {
+ upvar $edirs envdirs
+
+ set startenv 1
+ set cmpeid 2
+ if { $kill == 1 } {
+ set startenv 2
+ set cmpeid 3
+ }
+ set envbase [berkdb_env_noerr -home $envdirs($startenv)]
+ for { set i $cmpeid } { $i <= $num_sites } { incr i } {
+ if { $i == $kill } {
+ continue
+ }
+ set cmpenv [berkdb_env_noerr -home $envdirs($i)]
+ puts "Compare $envdirs($startenv) with $envdirs($i)"
+ #
+ # Compare 2 envs. We assume the name of the database that
+ # db_reptest creates and know it is 'am1.db'.
+ # We want as other args:
+ # 0 - compare_shared_portion
+ # 1 - match databases
+ # 0 - don't compare logs (for now)
+ rep_verify $envdirs($startenv) $envbase $envdirs($i) $cmpenv \
+ 0 1 0 am1.db
+ $cmpenv close
+ }
+ $envbase close
+}
+
+proc get_nsites { cfgtype restoredir } {
+ global last_nsites
+
+ #
+ # The number of sites must be the same for all. Read the
+ # first site's saved DB_CONFIG file if we're restoring since
+ # we only know we have at least 1 site.
+ #
+ if { $cfgtype == "restore" } {
+ set cid [open $restoredir/DB_CONFIG.1 r]
+ while { [gets $cid cfglist] } {
+ puts "Read in: $cfglist"
+ set cfg [lindex $cfglist 0]
+ if { $cfg == "rep_set_nsites" } {
+ set num_sites [lindex $cfglist 1]
+ break;
+ }
+ }
+ close $cid
+ return $num_sites
+ }
+ if { $cfgtype == "random" } {
+ #
+ # Sometimes 'random' doesn't seem to do a good job. I have
+ # seen on all iterations after the first one, nsites is
+ # always 2, 100% of the time. Add this bit to make sure
+ # this nsites values is different from the last iteration.
+ #
+ set n [berkdb random_int 2 5]
+ while { $n == $last_nsites } {
+ set n [berkdb random_int 2 5]
+puts "Getting random nsites between 2 and 5. Got $n, last_nsites $last_nsites"
+ }
+ set last_nsites $n
+ return $n
+# return [berkdb random_int 2 5]
+ }
+ if { $cfgtype == "basic0" } {
+ return 2
+ }
+ if { $cfgtype == "basic1" } {
+ return 3
+ }
+ return -1
+}
+
+#
+# Run with master leases? 25%/75% (use a master lease 25% of the time).
+#
+proc get_lease { cfgtype restoredir } {
+ #
+ # The number of sites must be the same for all. Read the
+ # first site's saved DB_CONFIG file if we're restoring since
+ # we only know we have at least 1 site.
+ #
+ if { $cfgtype == "restore" } {
+ set use_lease 0
+ set cid [open $restoredir/DB_CONFIG.1 r]
+ while { [gets $cid cfglist] } {
+# puts "Read in: $cfglist"
+ if { [llength $cfglist] == 0 } {
+ break;
+ }
+ set cfg [lindex $cfglist 0]
+ if { $cfg == "rep_set_config" } {
+ set lease [lindex $cfglist 1]
+ if { $lease == "db_rep_conf_lease" } {
+ set use_lease 1
+ break;
+ }
+ }
+ }
+ close $cid
+ return $use_lease
+ }
+ if { $cfgtype == "random" } {
+ set leases { 1 0 0 0 }
+ set len [expr [llength $leases] - 1]
+ set i [berkdb random_int 0 $len]
+ return [lindex $leases $i]
+ }
+ if { $cfgtype == "basic0" } {
+ return 0
+ }
+ if { $cfgtype == "basic1" } {
+ return 0
+ }
+}
+
+#
+# Do a kill test about half the time. We randomly choose a
+# site number to kill, it could be a master or a client.
+# Return 0 if we don't kill any site.
+#
+proc get_kill { cfgtype restoredir num_sites } {
+ if { $cfgtype == "restore" } {
+ set ksite 0
+ for { set i 1 } { $i <= $num_sites } { incr i } {
+ set cid [open $restoredir/DB_REPTEST_ARGS.$i r]
+ #
+ # !!!
+ # We currently assume the args file is 1 line.
+ # We assume only 1 site can get killed. So, if we
+ # find one, we break the loop and don't look further.
+ #
+ gets $cid arglist
+ close $cid
+# puts "Read in: $arglist"
+ set dokill [lsearch $arglist "-k"]
+ if { $dokill != -1 } {
+ set ksite $i
+ break
+ }
+ }
+ return $ksite
+ }
+ if { $cfgtype == "random" } {
+ set k { 0 0 0 1 1 1 0 1 1 0 }
+ set len [expr [llength $k] - 1]
+ set i [berkdb random_int 0 $len]
+ if { [lindex $k $i] == 1 } {
+ set ksite [berkdb random_int 1 $num_sites]
+ } else {
+ set ksite 0
+ }
+ return $ksite
+ }
+ if { $cfgtype == "basic0" || $cfgtype == "basic1" } {
+ return 0
+ } else {
+ error "Get_kill: Invalid config type $cfgtype"
+ }
+}
+
+#
+# Use peers or only the master for requests? 25%/75% (use a peer 25%
+# of the time and master 75%)
+#
+proc get_peers { cfgtype } {
+ if { $cfgtype == "random" } {
+ set peer { 0 0 0 1 }
+ set len [expr [llength $peer] - 1]
+ set i [berkdb random_int 0 $len]
+ return [lindex $peer $i]
+ }
+ if { $cfgtype == "basic0" || $cfgtype == "basic1" } {
+ return 0
+ }
+}
+
+#
+# Start with a master or all clients? 25%/75% (use a master 25%
+# of the time and have all clients 75%)
+#
+proc get_usemaster { cfgtype } {
+ if { $cfgtype == "random" } {
+ set mst { 1 0 0 0 }
+ set len [expr [llength $mst] - 1]
+ set i [berkdb random_int 0 $len]
+ return [lindex $mst $i]
+ }
+ if { $cfgtype == "basic0" } {
+ return 1
+ }
+ if { $cfgtype == "basic1" } {
+ return 0
+ }
+}
+
+#
+# If we use a master, which site? This proc will return
+# the site number of the mastersite, or it will return
+# 0 if no site should start as master. Sites are numbered
+# starting at 1.
+#
+proc get_mastersite { cfgtype usemaster nsites } {
+ if { $usemaster == 0 } {
+ return 0
+ }
+ if { $cfgtype == "random" } {
+ return [berkdb random_int 1 $nsites]
+ }
+ if { $cfgtype == "basic0" } {
+ return 1
+ }
+ if { $cfgtype == "basic1" } {
+ return 0
+ }
+}
+
+#
+# This is the number of worker threads performing the workload.
+# This is not the number of message processing threads.
+#
+# Scale back the number of worker threads if leases are in use.
+# The timing with leases can be fairly sensitive and since all sites
+# run on the local machine, too many workers on every site can
+# overwhelm the system, causing lost messages and delays that make
+# the tests fail. Rather than try to tweak timeouts, just reduce
+# the workloads a bit.
+#
+proc get_workers { cfgtype lease } {
+ if { $cfgtype == "random" } {
+ if { $lease } {
+ return [berkdb random_int 2 4]
+ } else {
+ return [berkdb random_int 2 8]
+ }
+ }
+ if { $cfgtype == "basic0" || $cfgtype == "basic1" } {
+ return 5
+ }
+}
+
+proc get_dbtype { cfgtype } {
+ if { $cfgtype == "random" } {
+ #
+ # 50% btree, 25% queue 12.5% hash 12.5% recno
+ # We favor queue only because there is special handling
+ # for queue in internal init.
+ #
+# set methods {btree btree btree btree queue queue hash recno}
+ set methods {btree btree btree btree hash recno}
+ set len [expr [llength $methods] - 1]
+ set i [berkdb random_int 0 $len]
+ return [lindex $methods $i]
+ }
+ if { $cfgtype == "basic0" || $cfgtype == "basic1" } {
+ return btree
+ }
+}
+
+proc get_runtime { cfgtype } {
+ if { $cfgtype == "random" } {
+ return [berkdb random_int 100 500]
+ }
+ if { $cfgtype == "basic0" } {
+ return 100
+ }
+ if { $cfgtype == "basic1" } {
+ return 150
+ }
+}
+
+proc get_rport { baseport i num_sites cfgtype} {
+ global rporttype
+
+ if { $cfgtype == "random" && $rporttype == "NULL" } {
+ #
+ # The circular comm choices seem problematic.
+ # Remove them for now.
+ #
+# set types {backcirc forwcirc full onesite}
+ set types {full onesite}
+ set len [expr [llength $types] - 1]
+ set rindex [berkdb random_int 0 $len]
+ set rporttype [lindex $types $rindex]
+ }
+ if { $cfgtype == "basic0" } {
+ set rporttype onesite
+ }
+ if { $cfgtype == "basic1" } {
+ set rporttype full
+ }
+ #
+ # This produces a circular knowledge ring. Either forward
+ # or backward. In the forwcirc, ENV1 knows (via -r) about
+ # ENV2, ENV2 knows about ENV3, ..., ENVX knows about ENV1.
+ #
+ if { $rporttype == "forwcirc" } {
+ if { $i != $num_sites } {
+ return [list [expr $baseport + $i + 1]]
+ } else {
+ return [list [expr $baseport + 1]]
+ }
+ }
+ if { $rporttype == "backcirc" } {
+ if { $i != 1 } {
+ return [list [expr $baseport + $i - 1]]
+ } else {
+ return [list [expr $baseport + $num_sites]]
+ }
+ }
+ #
+ # This produces a configuration where site 1 does not know
+ # about any other site and every other site knows about site 1.
+ #
+ if { $rporttype == "onesite" } {
+ if { $i == 1 } {
+ return {}
+ } else {
+ return [list [expr $baseport + 1]]
+ }
+ }
+ #
+ # This produces a fully connected configuration
+ #
+ if { $rporttype == "full" } {
+ set rlist {}
+ for { set site 1 } { $site <= $num_sites } { incr site } {
+ if { $site != $i } {
+ lappend rlist [expr $baseport + $site]
+ }
+ }
+ return $rlist
+ }
+}
+
+proc parse_runtime { progargs } {
+ set i [lsearch $progargs "-T"]
+ set val [lindex $progargs [expr $i + 1]]
+ return $val
+}
diff --git a/db-4.8.30/test/dbm.tcl b/db-4.8.30/test/dbm.tcl
new file mode 100644
index 0000000..ff43a82
--- /dev/null
+++ b/db-4.8.30/test/dbm.tcl
@@ -0,0 +1,127 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST dbm
+# TEST Historic DBM interface test. Use the first 1000 entries from the
+# TEST dictionary. Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Then reopen the file, re-retrieve everything. Finally, delete
+# TEST everything.
+proc dbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "DBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/dbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good dbminit [berkdb dbminit $testfile] 0
+ set did [open $dict]
+
+ set flags ""
+ set txn ""
+ set count 0
+ set skippednullkey 0
+
+ puts "\tDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # DBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [berkdb store $str $str]
+ error_check_good dbm_store $ret 0
+
+ set d [berkdb fetch $str]
+ error_check_good dbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tDBM.c: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good dbminit2 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tDBM.d: sequential scan and delete"
+
+ error_check_good dbminit3 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set ret [berkdb delete $key]
+ error_check_good dbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ error_check_good "dbm_close" [berkdb dbmclose] 0
+}
diff --git a/db-4.8.30/test/dbscript.tcl b/db-4.8.30/test/dbscript.tcl
new file mode 100644
index 0000000..8906b3f
--- /dev/null
+++ b/db-4.8.30/test/dbscript.tcl
@@ -0,0 +1,358 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Random db tester.
+# Usage: dbscript file numops min_del max_add key_avg data_avgdups
+# method: method (we pass this in so that fixed-length records work)
+# file: db file on which to operate
+# numops: number of operations to do
+# ncurs: number of cursors
+# min_del: minimum number of keys before you disable deletes.
+# max_add: maximum number of keys before you disable adds.
+# key_avg: average key size
+# data_avg: average data size
+# dups: 1 indicates dups allowed, 0 indicates no dups
+# errpct: What percent of operations should generate errors
+# seed: Random number generator seed (-1 means use pid)
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt args"
+
+# Verify usage
+if { $argc < 10 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set file [lindex $argv 1]
+set numops [ lindex $argv 2 ]
+set ncurs [ lindex $argv 3 ]
+set min_del [ lindex $argv 4 ]
+set max_add [ lindex $argv 5 ]
+set key_avg [ lindex $argv 6 ]
+set data_avg [ lindex $argv 7 ]
+set dups [ lindex $argv 8 ]
+set errpct [ lindex $argv 9 ]
+set args [ lindex $argv 10 ]
+
+berkdb srand $rand_init
+
+puts "Beginning execution for [pid]"
+puts "$file database"
+puts "$numops Operations"
+puts "$ncurs cursors"
+puts "$min_del keys before deletes allowed"
+puts "$max_add or fewer keys to add"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+puts "$method $args"
+if { $dups != 1 } {
+ puts "No dups"
+} else {
+ puts "Dups allowed"
+}
+puts "$errpct % Errors"
+
+flush stdout
+
+set db [eval {berkdb_open} $args $file]
+set cerr [catch {error_check_good dbopen [is_substr $db db] 1} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+# set method [$db get_type]
+set record_based [is_record_based $method]
+
+# Initialize globals including data
+global nkeys
+global l_keys
+global a_keys
+
+set nkeys [db_init $db 1]
+puts "Initial number of keys: $nkeys"
+
+set pflags ""
+set gflags ""
+set txn ""
+
+# Open the cursors
+set curslist {}
+for { set i 0 } { $i < $ncurs } { incr i } {
+ set dbc [$db cursor]
+ set cerr [catch {error_check_good dbcopen [is_substr $dbc $db.c] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ set cerr [catch {error_check_bad cursor_create $dbc NULL} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ lappend curslist $dbc
+
+}
+
+# On each iteration we're going to generate random keys and
+# data. We'll select either a get/put/delete operation unless
+# we have fewer than min_del keys in which case, delete is not
+# an option or more than max_add in which case, add is not
+# an option. The tcl global arrays a_keys and l_keys keep track
+# of key-data pairs indexed by key and a list of keys, accessed
+# by integer.
+set adds 0
+set puts 0
+set gets 0
+set dels 0
+set bad_adds 0
+set bad_puts 0
+set bad_gets 0
+set bad_dels 0
+
+for { set iter 0 } { $iter < $numops } { incr iter } {
+ set op [pick_op $min_del $max_add $nkeys]
+ set err [is_err $errpct]
+
+ # The op0's indicate that there aren't any duplicates, so we
+ # exercise regular operations. If dups is 1, then we'll use
+ # cursor ops.
+ switch $op$dups$err {
+ add00 {
+ incr adds
+
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ newpair $k [pad_data $method $data]
+ }
+ add01 {
+ incr bad_adds
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ add10 {
+ incr adds
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ if { [berkdb random_int 1 2] == 1 } {
+ # Add a new key
+ set k [random_data $key_avg 1 a_keys \
+ $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn \
+ {-keyfirst $k $data}]
+ newpair $k [pad_data $method $data]
+ } else {
+ # Add a new duplicate
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+
+ set op [pick_cursput]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn {$op $k $data}]
+ adddup $k [lindex $dbcinfo 2] $data
+ }
+ }
+ add11 {
+ # TODO
+ incr bad_adds
+ set ret 1
+ }
+ put00 {
+ incr puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn {$k $data}]
+ changepair $k [pad_data $method $data]
+ }
+ put01 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ put10 {
+ incr puts
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+
+ set ret [eval {$dbc put} $txn {-current $data}]
+ changedup $k [lindex $dbcinfo 2] $data
+ }
+ put11 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set dbc [$db cursor]
+ set ret [eval {$dbc put} $txn {-current $data}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ get00 {
+ incr gets
+ set k [random_key]
+ set val [eval {$db get} $txn {$k}]
+ set data [pad_data $method [lindex [lindex $val 0] 1]]
+ if { $data == $a_keys($k) } {
+ set ret 0
+ } else {
+ set ret "FAIL: Error got |$data| expected |$a_keys($k)|"
+ }
+ # Get command requires no state change
+ }
+ get01 {
+ incr bad_gets
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db get} $txn {$k}]
+ # Error case so no change to data state
+ }
+ get10 {
+ incr gets
+ set dbcinfo [random_cursor $curslist]
+ if { [llength $dbcinfo] == 3 } {
+ set ret 0
+ else
+ set ret 0
+ }
+ # Get command requires no state change
+ }
+ get11 {
+ incr bad_gets
+ set k [random_key]
+ set dbc [$db cursor]
+ if { [berkdb random_int 1 2] == 1 } {
+ set dir -next
+ } else {
+ set dir -prev
+ }
+ set ret [eval {$dbc get} $txn {-next $k}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error and get case so no change to data state
+ }
+ del00 {
+ incr dels
+ set k [random_key]
+ set ret [eval {$db del} $txn {$k}]
+ rempair $k
+ }
+ del01 {
+ incr bad_dels
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db del} $txn {$k}]
+ # Error case so no change to data state
+ }
+ del10 {
+ incr dels
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set ret [eval {$dbc del} $txn]
+ remdup [lindex dbcinfo 1] [lindex dbcinfo 2]
+ }
+ del11 {
+ incr bad_dels
+ set c [$db cursor]
+ set ret [eval {$c del} $txn]
+ set cerr [catch {error_check_good curs_close \
+ [$c close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ }
+ if { $err == 1 } {
+ # Verify failure.
+ set cerr [catch {error_check_good $op$dups$err:$k \
+ [is_substr Error $ret] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ } else {
+ # Verify success
+ set cerr [catch {error_check_good $op$dups$err:$k $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ }
+
+ flush stdout
+}
+
+# Close cursors and file
+foreach i $curslist {
+ set r [$i close]
+ set cerr [catch {error_check_good cursor_close:$i $r 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+}
+
+set r [$db close]
+set cerr [catch {error_check_good db_close:$db $r 0} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: $adds adds $gets gets $puts puts $dels dels"
+puts "Error ops: $bad_adds adds $bad_gets gets $bad_puts puts $bad_dels dels"
+flush stdout
+
+eval filecheck $file {$txn} $args
+
+exit
diff --git a/db-4.8.30/test/ddoyscript.tcl b/db-4.8.30/test/ddoyscript.tcl
new file mode 100644
index 0000000..f17891c
--- /dev/null
+++ b/db-4.8.30/test/ddoyscript.tcl
@@ -0,0 +1,171 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Deadlock detector script tester.
+# Usage: ddoyscript dir lockerid numprocs
+# dir: DBHOME directory
+# lockerid: Lock id for this locker
+# numprocs: Total number of processes running
+# myid: id of this process --
+# the order that the processes are created is the same
+# in which their lockerid's were allocated so we know
+# that there is a locker age relationship that is isomorphic
+# with the order releationship of myid's.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddoyscript dir lockerid numprocs oldoryoung"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set lockerid [ lindex $argv 1 ]
+set numprocs [ lindex $argv 2 ]
+set old_or_young [lindex $argv 3]
+set myid [lindex $argv 4]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+# There are two cases here -- oldest/youngest or a ring locker.
+
+if { $myid == 0 || $myid == [expr $numprocs - 1] } {
+ set waitobj NULL
+ set ret 0
+
+ if { $myid == 0 } {
+ set objid 2
+ if { $old_or_young == "o" } {
+ set waitobj [expr $numprocs - 1]
+ }
+ } else {
+ if { $old_or_young == "y" } {
+ set waitobj 0
+ }
+ set objid 4
+ }
+
+ # Acquire own read lock
+ if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good selfget:$objid [is_substr $selflock $myenv] 1
+ }
+
+ # Acquire read lock
+ if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 10
+
+ if { $waitobj == "NULL" } {
+ # Sleep for a good long while
+ tclsleep 90
+ } else {
+ # Acquire write lock
+ if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
+ != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockget:$waitobj \
+ [is_substr $lock2 $myenv] 1
+
+ # Now release it
+ if {[catch {$lock2 put} err] != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ }
+ }
+
+ }
+
+ # Release self lock
+ if {[catch {$selflock put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good selfput:oy:$myid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+ # Release first lock
+ if {[catch {$lock1 put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+} else {
+ # Make sure that we succeed if we're locking the same object as
+ # oldest or youngest.
+ if { [expr $myid % 2] == 0 } {
+ set mode read
+ } else {
+ set mode write
+ }
+ # Obtain first lock (should always succeed).
+ if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+
+ set nextobj [expr $myid + 1]
+ if { $nextobj == [expr $numprocs - 1] } {
+ set nextobj 1
+ }
+
+ set ret 1
+ if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$nextobj $lock2 NULL
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+}
+
+puts $ret
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+exit
diff --git a/db-4.8.30/test/ddscript.tcl b/db-4.8.30/test/ddscript.tcl
new file mode 100644
index 0000000..d154bd6
--- /dev/null
+++ b/db-4.8.30/test/ddscript.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Deadlock detector script tester.
+# Usage: ddscript dir test lockerid objid numprocs
+# dir: DBHOME directory
+# test: Which test to run
+# lockerid: Lock id for this locker
+# objid: Object id to lock.
+# numprocs: Total number of processes running
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddscript dir test lockerid objid numprocs"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set test [ lindex $argv 1 ]
+set lockerid [ lindex $argv 2 ]
+set objid [ lindex $argv 3 ]
+set numprocs [ lindex $argv 4 ]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+puts [eval $test $myenv $lockerid $objid $numprocs]
+
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+
+exit
diff --git a/db-4.8.30/test/dead001.tcl b/db-4.8.30/test/dead001.tcl
new file mode 100644
index 0000000..83e92f0
--- /dev/null
+++ b/db-4.8.30/test/dead001.tcl
@@ -0,0 +1,86 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST dead001
+# TEST Use two different configurations to test deadlock detection among a
+# TEST variable number of processes. One configuration has the processes
+# TEST deadlocked in a ring. The other has the processes all deadlocked on
+# TEST a single resource.
+proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum "001"} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "Dead$tnum: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set env [berkdb_env -create \
+ -mode 0644 -lock -lock_timeout $timeout -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ if {$timeout == 0 } {
+ set dpid [exec $util_path/db_deadlock -v -t 0.100000 \
+ -h $testdir >& $testdir/dd.out &]
+ } else {
+ set dpid [exec $util_path/db_deadlock -v -t 0.100000 \
+ -ae -h $testdir >& $testdir/dd.out &]
+ }
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "\tDead$tnum: dead check..."
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ # Windows needs files closed before deleting files, so pause a little
+ tclsleep 3
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/db-4.8.30/test/dead002.tcl b/db-4.8.30/test/dead002.tcl
new file mode 100644
index 0000000..9370895
--- /dev/null
+++ b/db-4.8.30/test/dead002.tcl
@@ -0,0 +1,80 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST dead002
+# TEST Same test as dead001, but use "detect on every collision" instead
+# TEST of separate deadlock detector.
+proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum 002} } {
+ source ./include.tcl
+
+ puts "Dead$tnum: Deadlock detector tests (detect on every collision)"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set lmode "default"
+ if { $timeout != 0 } {
+ set lmode "expire"
+ }
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir \
+ -lock -lock_timeout $timeout -lock_detect $lmode]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ # If we're running with timeouts, pause so that
+ # locks will have a chance to time out.
+ if { $timeout != 0 } {
+ tclsleep 2
+ }
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+
+ puts "\tDead$tnum: dead check ..."
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/db-4.8.30/test/dead003.tcl b/db-4.8.30/test/dead003.tcl
new file mode 100644
index 0000000..f8c8c39
--- /dev/null
+++ b/db-4.8.30/test/dead003.tcl
@@ -0,0 +1,98 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST dead003
+# TEST
+# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
+proc dead003 { {procs "2 4 10"} {tests "ring clump"} {tnum "003"} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set detects { oldest youngest }
+ puts "Dead$tnum: Deadlock detector tests: $detects"
+
+ # Create the environment.
+ foreach d $detects {
+ env_cleanup $testdir
+ puts "\tDead$tnum.a: creating environment for $d"
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir -lock -lock_detect $d]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+ set ret [$env lock_id_set \
+ $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path\
+ test_path/ddscript.tcl $testdir \
+ $t $locker $i $n >& \
+ $testdir/dead$tnum.log.$i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl \
+ $testdir/dead$tnum.log.$i $testdir \
+ $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ puts "\tDead$tnum: dead check..."
+ dead_check $t $n 0 $dead $clean $other
+ #
+ # If we get here we know we have the
+ # correct number of dead/clean procs, as
+ # checked by dead_check above. Now verify
+ # that the right process was the one.
+ puts "\tDead$tnum: Verify $d locks were aborted"
+ set l ""
+ if { $d == "oldest" } {
+ set l [expr $n - 1]
+ }
+ if { $d == "youngest" } {
+ set l 0
+ }
+ set did [open $testdir/dead$tnum.log.$l]
+ while { [gets $did val] != -1 } {
+ error_check_good check_abort \
+ $val 1
+ }
+ close $did
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/dead004.tcl b/db-4.8.30/test/dead004.tcl
new file mode 100644
index 0000000..635bac2
--- /dev/null
+++ b/db-4.8.30/test/dead004.tcl
@@ -0,0 +1,107 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Deadlock Test 4.
+# This test is designed to make sure that we handle youngest and oldest
+# deadlock detection even when the youngest and oldest transactions in the
+# system are not involved in the deadlock (that is, we want to abort the
+# youngest/oldest which is actually involved in the deadlock, not simply
+# the youngest/oldest in the system).
+# Since this is used for transaction systems, the locker ID is what we
+# use to identify age (smaller number is older).
+#
+# The set up is that we have a total of 6 processes. The oldest (locker 0)
+# and the youngest (locker 5) simply acquire a lock, hold it for a long time
+# and then release it. The rest form a ring, obtaining lock N and requesting
+# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
+# to abort and not 0 or 5.
+
+proc dead004 { {tnum "004"} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ foreach a { o y } {
+ puts "Dead$tnum: Deadlock detector test -a $a"
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
+ -h $testdir >& $testdir/dd.out &]
+
+ set procs 6
+
+ foreach n $procs {
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddoyscript.tcl $testdir $locker $n $a $i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddoyscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $locker $n $a $i &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ }
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+
+ puts "\tDead$tnum: dead check..."
+ dead_check oldyoung $n 0 $dead $clean $other
+
+ # Now verify that neither the oldest nor the
+ # youngest were the deadlock.
+ set did [open $testdir/dead$tnum.log.0]
+ error_check_bad file:young [gets $did val] -1
+ error_check_good read:young $val 1
+ close $did
+
+ set did [open $testdir/dead$tnum.log.[expr $procs - 1]]
+ error_check_bad file:old [gets $did val] -1
+ error_check_good read:old $val 1
+ close $did
+
+ # Windows needs files closed before deleting files,
+ # so pause a little
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/dead005.tcl b/db-4.8.30/test/dead005.tcl
new file mode 100644
index 0000000..939fe63
--- /dev/null
+++ b/db-4.8.30/test/dead005.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Deadlock Test 5.
+# Test out the minlocks, maxlocks, and minwrites options
+# to the deadlock detector.
+proc dead005 { { procs "4 6 10" } \
+ {tests "maxlocks maxwrites minlocks minwrites" } { tnum "005" } } {
+ source ./include.tcl
+
+ foreach t $tests {
+ puts "Dead$tnum.$t: deadlock detection tests"
+ env_cleanup $testdir
+
+ # Create the environment.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ case $t {
+ maxlocks { set to m }
+ maxwrites { set to W }
+ minlocks { set to n }
+ minwrites { set to w }
+ }
+ foreach n $procs {
+ set dpid [exec $util_path/db_deadlock -v -t 0.100000 \
+ -h $testdir -a $to >& $testdir/dd.out &]
+ sentinel_init
+ set pidlist ""
+
+ # Fire off the tests
+ puts "\tDead$tnum: $t test with $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "\tDead$tnum: dead check..."
+ dead_check $t $n 0 $dead $clean $other
+ # Now verify that the correct participant
+ # got deadlocked.
+ switch $t {
+ maxlocks {set f [expr $n - 1]}
+ maxwrites {set f 2}
+ minlocks {set f 0}
+ minwrites {set f 1}
+ }
+ set did [open $testdir/dead$tnum.log.$f]
+ error_check_bad file:$t [gets $did val] -1
+ error_check_good read($f):$t $val DEADLOCK
+ close $did
+ }
+ error_check_good lock_env:close [$env close] 0
+ # Windows needs files closed before deleting them, so pause
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead001.log.$i
+ }
+ }
+}
diff --git a/db-4.8.30/test/dead006.tcl b/db-4.8.30/test/dead006.tcl
new file mode 100644
index 0000000..242afe2
--- /dev/null
+++ b/db-4.8.30/test/dead006.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST dead006
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 1000} {tnum 006} } {
+ source ./include.tcl
+
+ dead001 $procs $tests $timeout $tnum
+ dead002 $procs $tests $timeout $tnum
+}
diff --git a/db-4.8.30/test/dead007.tcl b/db-4.8.30/test/dead007.tcl
new file mode 100644
index 0000000..67871bf
--- /dev/null
+++ b/db-4.8.30/test/dead007.tcl
@@ -0,0 +1,35 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST dead007
+# TEST Tests for locker and txn id wraparound.
+proc dead007 { {tnum "007"} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+ puts "Dead$tnum.a -- wrap around"
+ set lock_curid [expr $lock_maxid - 2]
+ dead001 "2 10" "ring clump" "0" $tnum
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "4 10"
+ dead004 $tnum
+
+ puts "Dead$tnum.b -- extend space"
+ set lock_maxid [expr $lock_maxid - 3]
+ set lock_curid [expr $lock_maxid - 1]
+ dead001 "4 10" "ring clump" "0" $tnum
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "10"
+ dead004 $tnum
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+ # Return the empty string so we don't return lock_maxid.
+ return ""
+}
diff --git a/db-4.8.30/test/env001.tcl b/db-4.8.30/test/env001.tcl
new file mode 100644
index 0000000..29f7757
--- /dev/null
+++ b/db-4.8.30/test/env001.tcl
@@ -0,0 +1,145 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env001
+# TEST Test of env remove interface (formerly env_remove).
+proc env001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile $testdir/env.db
+ set t1 $testdir/t1
+
+ puts "Env001: Test of environment remove interface."
+ env_cleanup $testdir
+
+ # Try opening without Create flag should error
+ puts "\tEnv001.a: Open without create (should fail)."
+ catch {set env [berkdb_env_noerr -home $testdir]} ret
+ error_check_good env:fail [is_substr $ret "no such file"] 1
+
+ # Now try opening with create
+ puts "\tEnv001.b: Open with create."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # Make sure that close works.
+ puts "\tEnv001.c: Verify close."
+ error_check_good env:close:$env [$env close] 0
+
+ # Make sure we can reopen.
+ puts "\tEnv001.d: Remove on closed environments."
+ puts "\t\tEnv001.d.1: Verify re-open."
+ set env [berkdb_env -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # remove environment
+ puts "\t\tEnv001.d.2: Close environment."
+ error_check_good env:close [$env close] 0
+ puts "\t\tEnv001.d.3: Try remove with force (should succeed)."
+ error_check_good \
+ envremove [berkdb envremove -force -home $testdir] 0
+
+ # HP-UX doesn't allow a second handle on an open env.
+ if { $is_hp_test != 1 } {
+ puts "\tEnv001.e: Remove on open environments."
+ puts "\t\tEnv001.e.1: Env is open by single proc,\
+ remove no force."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -home $testdir} ret]
+ error_check_good env:remove $stat 1
+ error_check_good env:close [$env close] 0
+ }
+
+ puts \
+ "\t\tEnv001.e.2: Env is open by single proc, remove with force."
+ if { $is_hp_test != 1 } {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -force -home $testdir} ret]
+ error_check_good env:remove(force) $ret 0
+ #
+ # Even though the underlying env is gone, we need to close
+ # the handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_remove $stat 0
+ error_check_good env:close_after_remove \
+ [is_substr $ret "recovery"] 1
+ }
+
+ puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
+ # should fail
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+ # First close our env, but leave remote open
+ error_check_good env:close [$env close] 0
+ catch {berkdb envremove -home $testdir} ret
+ error_check_good envremove:2procs:noforce [is_substr $errorCode EBUSY] 1
+ #
+ # even though it failed, $env is no longer valid, so remove it in
+ # the remote process
+ set remote_close [send_cmd $f1 "$remote_env close"]
+ error_check_good remote_close $remote_close 0
+
+ # exit remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+
+ puts "\t\tEnv001.e.4: Env is open by 2 procs, remove with force."
+ if { $is_hp_test != 1 } {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ catch {berkdb envremove -force -home $testdir} ret
+ error_check_good envremove:2procs:force $ret 0
+ #
+ # We still need to close our handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_error $stat 0
+ error_check_good env:close_after_error \
+ [is_substr $ret recovery] 1
+
+ # Close down remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+ }
+
+ # Try opening in a different dir
+ puts "\tEnv001.f: Try opening env in another directory."
+ if { [file exists $testdir/NEWDIR] != 1 } {
+ file mkdir $testdir/NEWDIR
+ }
+ set eflags "-create -home $testdir/NEWDIR -mode 0644"
+ set env [eval {berkdb_env} $eflags]
+ error_check_bad env:open $env NULL
+ error_check_good env:close [$env close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/NEWDIR] 0
+
+ puts "\tEnv001 complete."
+}
diff --git a/db-4.8.30/test/env002.tcl b/db-4.8.30/test/env002.tcl
new file mode 100644
index 0000000..20144bf
--- /dev/null
+++ b/db-4.8.30/test/env002.tcl
@@ -0,0 +1,155 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env002
+# TEST Test of DB_LOG_DIR and env name resolution.
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the set_lg_dir option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- db_config and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env002 { } {
+ # env002 is essentially just a small driver that runs
+ # env002_body--formerly the entire test--twice; once, it
+ # supplies a "home" argument to use with environment opens,
+ # and the second time it sets DB_HOME instead.
+ # Note that env002_body itself calls env002_run_test to run
+ # the body of the actual test and check for the presence
+ # of logs. The nesting, I hope, makes this test's structure simpler.
+
+ global env
+ source ./include.tcl
+
+ puts "Env002: set_lg_dir test."
+
+ puts "\tEnv002: Running with -home argument to berkdb_env."
+ env002_body "-home $testdir"
+
+ puts "\tEnv002: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env002_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv002: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env002_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+
+}
+
+proc env002_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set logdir "logs_in_here"
+
+ file mkdir $testdir/$logdir
+
+ # Set up full path to $logdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$logdir
+ set fulllogdir [pwd]
+ cd $curdir
+
+ env002_make_config $logdir
+
+ # Run the meat of the test.
+ env002_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # Run the test again
+ env002_run_test a 2 "absolute path, config file" $home_arg \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ file mkdir $testdir/$logdir
+ env002_run_test b 1 "relative path, db_config" "$home_arg \
+ -log_dir $logdir -data_dir ." \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ # absolute
+ file mkdir $fulllogdir
+ env002_run_test b 2 "absolute path, db_config" "$home_arg \
+ -log_dir $fulllogdir -data_dir ." \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/$logdir
+ env002_make_config $logdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -log_dir $testdir/bogus \
+ -data_dir ." $testdir/$logdir
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 2 "relative path, both db_config and file" \
+ "$home_arg -log_dir $fulllogdir/bogus \
+ -data_dir ." $fulllogdir
+}
+
+proc env002_run_test { major minor msg env_args log_path} {
+ global testdir
+ set testfile "env002.db"
+
+ puts "\t\tEnv002.$major.$minor: $msg"
+
+ # Create an environment, with logging, and scribble some
+ # stuff in a [btree] database in it.
+ # puts [concat {berkdb_env -create -log -private} $env_args]
+ set dbenv [eval {berkdb_env -create -log -private} $env_args]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+ set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set key "some_key"
+ set data "some_data"
+
+ error_check_good db_put \
+ [$db put $key [chop_data btree $data]] 0
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now make sure the log file is where we want it to be.
+ error_check_good db_exists [file exists $testdir/$testfile] 1
+ error_check_good log_exists \
+ [file exists $log_path/log.0000000001] 1
+}
+
+proc env002_make_config { logdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_lg_dir $logdir"
+ close $cid
+}
diff --git a/db-4.8.30/test/env003.tcl b/db-4.8.30/test/env003.tcl
new file mode 100644
index 0000000..175d823
--- /dev/null
+++ b/db-4.8.30/test/env003.tcl
@@ -0,0 +1,148 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env003
+# TEST Test DB_TMP_DIR and env name resolution
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the -tmp_dir config option is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env003 { } {
+ # env003 is essentially just a small driver that runs
+ # env003_body twice. First, it supplies a "home" argument
+ # to use with environment opens, and the second time it sets
+ # DB_HOME instead.
+ # Note that env003_body itself calls env003_run_test to run
+ # the body of the actual test.
+
+ global env
+ source ./include.tcl
+
+ puts "Env003: DB_TMP_DIR test."
+
+ puts "\tEnv003: Running with -home argument to berkdb_env."
+ env003_body "-home $testdir"
+
+ puts "\tEnv003: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env003_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv003: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env003_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+}
+
+proc env003_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set tmpdir "tmpfiles_in_here"
+ file mkdir $testdir/$tmpdir
+
+ # Set up full path to $tmpdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$tmpdir
+ set fulltmpdir [pwd]
+ cd $curdir
+
+ # Create DB_CONFIG
+ env003_make_config $tmpdir
+
+ # Run the meat of the test.
+ env003_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$tmpdir
+
+ env003_make_config $fulltmpdir
+
+ # Run the test again
+ env003_run_test a 2 "absolute path, config file" $home_arg \
+ $fulltmpdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ env003_run_test b 1 "relative path, db_config" "$home_arg \
+ -tmp_dir $tmpdir -data_dir ." \
+ $testdir/$tmpdir
+
+ # absolute paths
+ env003_run_test b 2 "absolute path, db_config" "$home_arg \
+ -tmp_dir $fulltmpdir -data_dir ." \
+ $fulltmpdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/bogus
+ env003_make_config $tmpdir
+
+ env003_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -tmp_dir $testdir/bogus -data_dir ." \
+ $testdir/$tmpdir
+
+ file mkdir $fulltmpdir/bogus
+ env003_make_config $fulltmpdir
+
+ env003_run_test c 2 "absolute path, both db_config and file" \
+ "$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
+ $fulltmpdir
+}
+
+proc env003_run_test { major minor msg env_args tmp_path} {
+ global testdir
+ global alphabet
+ global errorCode
+
+ puts "\t\tEnv003.$major.$minor: $msg"
+
+ # Create an environment and small-cached in-memory database to
+ # use.
+ set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
+ {-cachesize {0 50000 1}}]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ set db [berkdb_open -env $dbenv -create -btree]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Fill the database with more than its cache can fit.
+ #
+ # When CONFIG_TEST is defined, the tempfile is left linked so
+ # we can check for its existence. Size the data to overfill
+ # the cache--the temp file is created lazily, so it is created
+ # when the cache overflows.
+ #
+ set key "key"
+ set data [repeat $alphabet 2000]
+ error_check_good db_put [$db put $key $data] 0
+
+ # Check for exactly one temp file.
+ set ret [glob -nocomplain $tmp_path/BDB*]
+ error_check_good temp_file_exists [llength $ret] 1
+
+ # Can't remove temp file until db is closed on Windows.
+ error_check_good db_close [$db close] 0
+ fileremove -f $ret
+ error_check_good env_close [$dbenv close] 0
+
+}
+
+proc env003_make_config { tmpdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_tmp_dir $tmpdir"
+ close $cid
+}
diff --git a/db-4.8.30/test/env004.tcl b/db-4.8.30/test/env004.tcl
new file mode 100644
index 0000000..7b49483
--- /dev/null
+++ b/db-4.8.30/test/env004.tcl
@@ -0,0 +1,94 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env004
+# TEST Test multiple data directories. Do a bunch of different opens
+# TEST to make sure that the files are detected in different directories.
+proc env004 { } {
+ source ./include.tcl
+
+ set method "hash"
+ set omethod [convert_method $method]
+ set args [convert_args $method ""]
+
+ puts "Env004: Multiple data directory test."
+
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ puts "\tEnv004.a: Multiple data directories in DB_CONFIG file"
+
+ # Create a config file
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_data_dir data1"
+ puts $cid "set_data_dir data2"
+ puts $cid "set_data_dir data3"
+ close $cid
+
+ set e [berkdb_env -create -private -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $method $e $args
+ error_check_good env_close [$e close] 0
+
+ puts "\tEnv004.b: Multiple data directories in berkdb_env call."
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ # Now call dbenv with config specified
+ set e [berkdb_env -create -private \
+ -data_dir . -data_dir data1 -data_dir data2 \
+ -data_dir data3 -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $method $e $args
+ error_check_good env_close [$e close] 0
+}
+
+proc ddir_test { method e args } {
+ source ./include.tcl
+
+ set args [convert_args $args]
+ set omethod [convert_method $method]
+
+ # Now create one file in each directory
+ set db1 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data1/datafile1.db}]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data2/datafile2.db}]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data3/datafile3.db}]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Close the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+
+ # Now, reopen the files without complete pathnames and make
+ # sure that we find them.
+
+ set db1 [berkdb_open -env $e datafile1.db]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [berkdb_open -env $e datafile2.db]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [berkdb_open -env $e datafile3.db]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Finally close all the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+}
diff --git a/db-4.8.30/test/env005.tcl b/db-4.8.30/test/env005.tcl
new file mode 100644
index 0000000..f0f14e2
--- /dev/null
+++ b/db-4.8.30/test/env005.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env005
+# TEST Test that using subsystems without initializing them correctly
+# TEST returns an error. Cannot test mpool, because it is assumed in
+# TEST the Tcl code.
+proc env005 { } {
+ source ./include.tcl
+
+ puts "Env005: Uninitialized env subsystems test."
+
+ env_cleanup $testdir
+ puts "\tEnv005.a: Creating env with no subsystems."
+ set e [berkdb_env_noerr -create -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [berkdb_open -create -btree $testdir/env005.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rlist {
+ { "lock_detect" "Env005.b0"}
+ { "lock_get read 1 1" "Env005.b1"}
+ { "lock_id" "Env005.b2"}
+ { "lock_stat" "Env005.b3"}
+ { "lock_timeout 100" "Env005.b4"}
+ { "log_archive" "Env005.c0"}
+ { "log_cursor" "Env005.c1"}
+ { "log_file {1 1}" "Env005.c2"}
+ { "log_flush" "Env005.c3"}
+ { "log_put record" "Env005.c4"}
+ { "log_stat" "Env005.c5"}
+ { "txn" "Env005.d0"}
+ { "txn_checkpoint" "Env005.d1"}
+ { "txn_stat" "Env005.d2"}
+ { "txn_timeout 100" "Env005.d3"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err [is_substr $ret invalid] 1
+ }
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/db-4.8.30/test/env006.tcl b/db-4.8.30/test/env006.tcl
new file mode 100644
index 0000000..6f36d75
--- /dev/null
+++ b/db-4.8.30/test/env006.tcl
@@ -0,0 +1,90 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env006
+# TEST Make sure that all the utilities exist and run.
+# TEST Test that db_load -r options don't blow up.
+proc env006 { } {
+ source ./include.tcl
+
+ puts "Env006: Run underlying utilities."
+
+ set rlist {
+ { "db_archive" "Env006.a"}
+ { "db_checkpoint" "Env006.b"}
+ { "db_deadlock" "Env006.c"}
+ { "db_dump" "Env006.d"}
+ { "db_load" "Env006.e"}
+ { "db_printlog" "Env006.f"}
+ { "db_recover" "Env006.g"}
+ { "db_stat" "Env006.h"}
+ { "db_upgrade" "Env006.h"}
+ { "db_verify" "Env006.h"}
+ }
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+
+ puts "\t$msg: $cmd"
+
+ set stat [catch {exec $util_path/$cmd -?} ret]
+ error_check_good $cmd $stat 1
+
+ #
+ # Check for "usage", but only check "sage" so that
+ # we can handle either Usage or usage.
+ #
+ error_check_good $cmd.err [is_substr $ret sage] 1
+ }
+
+ env_cleanup $testdir
+ set env [eval berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set sub SUBDB
+ foreach case { noenv env } {
+ if { $case == "env" } {
+ set envargs " -env $env "
+ set homeargs " -h $testdir "
+ set testfile env006.db
+ } else {
+ set envargs ""
+ set homeargs ""
+ set testfile $testdir/env006.db
+ }
+
+ puts "\tEnv006.i: Testing db_load -r with $case."
+ set db [eval berkdb_open -create $envargs -btree $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set ret [eval \
+ exec $util_path/db_load -r lsn $homeargs $testfile]
+ error_check_good db_load_r_lsn $ret ""
+ set ret [eval \
+ exec $util_path/db_load -r fileid $homeargs $testfile]
+ error_check_good db_load_r_fileid $ret ""
+
+ error_check_good db_remove \
+ [eval {berkdb dbremove} $envargs $testfile] 0
+
+ puts "\tEnv006.j: Testing db_load -r with $case and subdbs."
+ set db [eval berkdb_open -create $envargs -btree $testfile $sub]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set ret [eval \
+ exec {$util_path/db_load} -r lsn $homeargs $testfile]
+ error_check_good db_load_r_lsn $ret ""
+ set ret [eval \
+ exec {$util_path/db_load} -r fileid $homeargs $testfile]
+ error_check_good db_load_r_fileid $ret ""
+
+ error_check_good \
+ db_remove [eval {berkdb dbremove} $envargs $testfile] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/db-4.8.30/test/env007.tcl b/db-4.8.30/test/env007.tcl
new file mode 100644
index 0000000..7091e8c
--- /dev/null
+++ b/db-4.8.30/test/env007.tcl
@@ -0,0 +1,701 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env007
+# TEST Test DB_CONFIG config file options for berkdb env.
+# TEST 1) Make sure command line option is respected
+# TEST 2) Make sure that config file option is respected
+# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+# TEST method is used, only the file is respected.
+# TEST Then test all known config options.
+# TEST Also test config options on berkdb open. This isn't
+# TEST really env testing, but there's no better place to put it.
+proc env007 { } {
+ global errorInfo
+ global errorCode
+ global passwd
+ global has_crypto
+ source ./include.tcl
+
+ puts "Env007: DB_CONFIG and getters test."
+ puts "Env007.a: Test berkdb env options using getters and stat."
+
+ # Set up options we can check via stat or getters. Structure
+ # of the list is:
+ # 0. Arg used in berkdb env command
+ # 1. Arg used in DB_CONFIG file
+ # 2. Value assigned in berkdb env command
+ # 3. Value assigned in DB_CONFIG file
+ # 4. Message output during test
+ # 5. Stat command to run (empty if we can't get the info
+ # from stat).
+ # 6. String to search for in stat output
+ # 7. Which arg to check in stat (needed for cases where
+ # we set more than one args at a time, but stat can
+ # only check one args, like cachesize)
+ # 8. Arg used in getter
+ #
+ set rlist {
+ { " -txn_max " "set_tx_max" "19" "31"
+ "Env007.a1: Txn Max" "txn_stat"
+ "Maximum txns" "0" "get_tx_max" }
+ { " -lock_max_locks " "set_lk_max_locks" "1070" "1290"
+ "Env007.a2: Lock Max" "lock_stat"
+ "Maximum locks" "0" "get_lk_max_locks" }
+ { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
+ "Env007.a3: Max Lockers" "lock_stat"
+ "Maximum lockers" "0" "get_lk_max_lockers" }
+ { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
+ "Env007.a4: Max Objects" "lock_stat"
+ "Maximum objects" "0" "get_lk_max_objects" }
+ { " -log_buffer " "set_lg_bsize" "65536" "131072"
+ "Env007.a5: Log Bsize" "log_stat"
+ "Log record cache size" "0" "get_lg_bsize" }
+ { " -log_max " "set_lg_max" "8388608" "9437184"
+ "Env007.a6: Log Max" "log_stat"
+ "Current log file size" "0" "get_lg_max" }
+ { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
+ "Env007.a7.0: Cachesize" "mpool_stat"
+ "Cache size (gbytes)" "0" "get_cachesize" }
+ { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
+ "Env007.a7.1: Cachesize" "mpool_stat"
+ "Cache size (bytes)" "1" "get_cachesize" }
+ { " -cachesize " "set_cachesize" "0 536870912 1" "1 0 1"
+ "Env007.a7.2: Cachesize" "mpool_stat"
+ "Number of caches" "2" "get_cachesize" }
+ { " -lock_timeout " "set_lock_timeout" "100" "120"
+ "Env007.a8: Lock Timeout" "lock_stat"
+ "Lock timeout value" "0" "get_timeout lock" }
+ { " -log_regionmax " "set_lg_regionmax" "8388608" "4194304"
+ "Env007.a9: Log Regionmax" "log_stat"
+ "Region size" "0" "get_lg_regionmax" }
+ { " -mpool_max_openfd " "set_mp_max_openfd" "17" "27"
+ "Env007.a10: Mmap max openfd" "mpool_stat"
+ "Maximum open file descriptors" "0" "get_mp_max_openfd" }
+ { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67"
+ "Env007.a11.0: Mmap max write" "mpool_stat"
+ "Maximum sequential buffer writes" "0" "get_mp_max_write" }
+ { " -mpool_max_write " "set_mp_max_write" "37 47" "57 67"
+ "Env007.a11.1: Mmap max write" "mpool_stat"
+ "Sleep after writing maximum buffers" "1" "get_mp_max_write" }
+ { " -mpool_mmap_size " "set_mp_mmapsize" "12582912" "8388608"
+ "Env007.a12: Mmapsize" "mpool_stat"
+ "Maximum memory-mapped file size" "0" "get_mp_mmapsize" }
+ { " -shm_key " "set_shm_key" "15" "35"
+ "Env007.a13: Shm Key" ""
+ "" "" "get_shm_key" }
+ { " -tmp_dir " "set_tmp_dir" "." "./TEMPDIR"
+ "Env007.a14: Temp dir" ""
+ "" "" "get_tmp_dir" }
+ { " -txn_timeout " "set_txn_timeout" "100" "120"
+ "Env007.a15: Txn timeout" "lock_stat"
+ "Transaction timeout value" "0" "get_timeout txn" }
+ { " -log_filemode " "set_lg_filemode" "417" "637"
+ "Env007.a16: Log FileMode" "log_stat"
+ "Log file mode" "0" "get_lg_filemode" }
+ }
+
+ set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn "
+ set qnxexclude {set_cachesize}
+
+ foreach item $rlist {
+ set envarg [lindex $item 0]
+ set configarg [lindex $item 1]
+ set envval [lindex $item 2]
+ set configval [lindex $item 3]
+ set msg [lindex $item 4]
+ set statcmd [lindex $item 5]
+ set statstr [lindex $item 6]
+ set index [lindex $item 7]
+ set getter [lindex $item 8]
+
+ if { $is_qnx_test &&
+ [lsearch $qnxexclude $configarg] != -1 } {
+ puts "\tEnv007.a: Skipping $configarg for QNX"
+ continue
+ }
+
+ env_cleanup $testdir
+
+ # First verify using just env args
+ puts "\t$msg Environment argument only"
+ set env [eval $e $envarg {$envval}]
+ error_check_good envopen:0 [is_valid_env $env] TRUE
+ error_check_good get_envval [eval $env $getter] $envval
+ if { $statcmd != "" } {
+ set statenvval [lindex $envval $index]
+ # log_stat reports the sum of the specified
+ # region size and the log buffer size.
+ if { $statstr == "Region size" } {
+ set lbufsize 32768
+ set statenvval [expr $statenvval + $lbufsize]
+ }
+ env007_check $env $statcmd $statstr $statenvval
+ }
+ error_check_good envclose:0 [$env close] 0
+
+ env_cleanup $testdir
+ env007_make_config $configarg $configval
+
+ # Verify using just config file
+ puts "\t$msg Config file only"
+ set env [eval $e]
+ error_check_good envopen:1 [is_valid_env $env] TRUE
+ error_check_good get_configval1 [eval $env $getter] $configval
+ if { $statcmd != "" } {
+ set statconfigval [lindex $configval $index]
+ if { $statstr == "Region size" } {
+ set statconfigval \
+ [expr $statconfigval + $lbufsize]
+ }
+ env007_check $env $statcmd $statstr $statconfigval
+ }
+ error_check_good envclose:1 [$env close] 0
+
+ # Now verify using env args and config args
+ puts "\t$msg Environment arg and config file"
+ set env [eval $e $envarg {$envval}]
+ error_check_good envopen:2 [is_valid_env $env] TRUE
+ # Getter should retrieve config val, not envval.
+ error_check_good get_configval2 [eval $env $getter] $configval
+ if { $statcmd != "" } {
+ env007_check $env $statcmd $statstr $statconfigval
+ }
+ error_check_good envclose:2 [$env close] 0
+ }
+
+ #
+ # Test all options that can be set in DB_CONFIG. Write it out
+ # to the file and make sure we can open the env. This execs
+ # the config file code. Also check with a getter that the
+ # expected value is returned.
+ #
+ puts "\tEnv007.b: Test berkdb env config options using getters\
+ and env open."
+
+ # The cfglist variable contains options that can be set in DB_CONFIG.
+ set cfglist {
+ { "set_data_dir" "." "get_data_dirs" "." }
+ { "set_flags" "db_auto_commit" "get_flags" "-auto_commit" }
+ { "set_flags" "db_cdb_alldb" "get_flags" "-cdb_alldb" }
+ { "set_flags" "db_direct_db" "get_flags" "-direct_db" }
+ { "set_flags" "db_nolocking" "get_flags" "-nolock" }
+ { "set_flags" "db_nommap" "get_flags" "-nommap" }
+ { "set_flags" "db_nopanic" "get_flags" "-nopanic" }
+ { "set_flags" "db_overwrite" "get_flags" "-overwrite" }
+ { "set_flags" "db_region_init" "get_flags" "-region_init" }
+ { "set_flags" "db_txn_nosync" "get_flags" "-nosync" }
+ { "set_flags" "db_txn_write_nosync" "get_flags" "-wrnosync" }
+ { "set_flags" "db_yieldcpu" "get_flags" "-yield" }
+ { "set_lg_bsize" "65536" "get_lg_bsize" "65536" }
+ { "set_lg_dir" "." "get_lg_dir" "." }
+ { "set_lg_max" "8388608" "get_lg_max" "8388608" }
+ { "set_lg_regionmax" "262144" "get_lg_regionmax" "262144" }
+ { "set_lk_detect" "db_lock_default" "get_lk_detect" "default" }
+ { "set_lk_detect" "db_lock_expire" "get_lk_detect" "expire" }
+ { "set_lk_detect" "db_lock_maxlocks" "get_lk_detect" "maxlocks" }
+ { "set_lk_detect" "db_lock_minlocks" "get_lk_detect" "minlocks" }
+ { "set_lk_detect" "db_lock_minwrite" "get_lk_detect" "minwrite" }
+ { "set_lk_detect" "db_lock_oldest" "get_lk_detect" "oldest" }
+ { "set_lk_detect" "db_lock_random" "get_lk_detect" "random" }
+ { "set_lk_detect" "db_lock_youngest" "get_lk_detect" "youngest" }
+ { "set_lk_max_lockers" "1500" "get_lk_max_lockers" "1500" }
+ { "set_lk_max_locks" "1290" "get_lk_max_locks" "1290" }
+ { "set_lk_max_objects" "1500" "get_lk_max_objects" "1500" }
+ { "set_lock_timeout" "100" "get_timeout lock" "100" }
+ { "set_mp_mmapsize" "12582912" "get_mp_mmapsize" "12582912" }
+ { "set_mp_max_write" "10 20" "get_mp_max_write" "10 20" }
+ { "set_mp_max_openfd" "10" "get_mp_max_openfd" "10" }
+ { "set_region_init" "1" "get_flags" "-region_init" }
+ { "set_shm_key" "15" "get_shm_key" "15" }
+ { "set_tas_spins" "15" "get_tas_spins" "15" }
+ { "set_tmp_dir" "." "get_tmp_dir" "." }
+ { "set_tx_max" "31" "get_tx_max" "31" }
+ { "set_txn_timeout" "50" "get_timeout txn" "50" }
+ { "set_verbose" "db_verb_deadlock" "get_verbose deadlock" "on" }
+ { "set_verbose" "db_verb_fileops" "get_verbose fileops" "on" }
+ { "set_verbose" "db_verb_fileops_all" "get_verbose fileops_all" "on" }
+ { "set_verbose" "db_verb_recovery" "get_verbose recovery" "on" }
+ { "set_verbose" "db_verb_register" "get_verbose register" "on" }
+ { "set_verbose" "db_verb_replication" "get_verbose rep" "on" }
+ { "set_verbose" "db_verb_waitsfor" "get_verbose wait" "on" }
+ }
+
+ env_cleanup $testdir
+ set e "berkdb_env_noerr -create -mode 0644 -home $testdir -txn"
+ set directlist {db_direct_db}
+
+ foreach item $cfglist {
+ env_cleanup $testdir
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+ set getter [lindex $item 2]
+ set getval [lindex $item 3]
+
+ env007_make_config $configarg $configval
+
+ # Verify using config file
+ puts "\t\tEnv007.b: $configarg $configval"
+
+ # Unconfigured/unsupported direct I/O is not reported
+ # as a failure.
+ set directmsg \
+ "direct I/O either not configured or not supported"
+ if {[catch { eval $e } env ]} {
+ if { [lsearch $directlist $configval] != -1 && \
+ [is_substr $env $directmsg] == 1 } {
+ continue
+ } else {
+ puts "FAIL: $env"
+ continue
+ }
+ }
+ error_check_good envvalid:1 [is_valid_env $env] TRUE
+ error_check_good getter:1 [eval $env $getter] $getval
+ error_check_good envclose:1 [$env close] 0
+ }
+
+ puts "\tEnv007.c: Test berkdb env options using getters and env open."
+ # The envopenlist variable contains options that can be set using
+ # berkdb env. We always set -mpool.
+# { "-system_mem" "-shm_key 1" "-system_mem" "get_open_flags" }
+ set envopenlist {
+ { "-cdb" "" "-cdb" "get_open_flags" }
+ { "-errpfx" "FOO" "FOO" "get_errpfx" }
+ { "-lock" "" "-lock" "get_open_flags" }
+ { "-log" "" "-log" "get_open_flags" }
+ { "" "" "-mpool" "get_open_flags" }
+ { "-txn" "" "-txn" "get_open_flags" }
+ { "-recover" "-txn" "-recover" "get_open_flags" }
+ { "-recover_fatal" "-txn" "-recover_fatal" "get_open_flags" }
+ { "-register" "-txn -recover" "-register" "get_open_flags" }
+ { "-use_environ" "" "-use_environ" "get_open_flags" }
+ { "-use_environ_root" "" "-use_environ_root" "get_open_flags" }
+ { "" "" "-create" "get_open_flags" }
+ { "-private" "" "-private" "get_open_flags" }
+ { "-thread" "" "-thread" "get_open_flags" }
+ { "-txn_timestamp" "100000000" "100000000" "get_tx_timestamp" }
+ }
+
+ if { $has_crypto == 1 } {
+ lappend envopenlist {
+ "-encryptaes" "$passwd" "-encryptaes" "get_encrypt_flags" }
+ }
+
+ set e "berkdb_env_noerr -create -mode 0644 -home $testdir"
+ set qnxexclude {-system_mem}
+ foreach item $envopenlist {
+ env_cleanup $testdir
+ set envarg [lindex $item 0]
+ set envval [lindex $item 1]
+ set retval [lindex $item 2]
+ set getter [lindex $item 3]
+
+ if { $is_qnx_test &&
+ [lsearch $qnxexclude $envarg] != -1} {
+ puts "\t\tEnv007: Skipping $envarg for QNX"
+ continue
+ }
+
+ puts "\t\tEnv007.c: $envarg $retval"
+
+ # Set up env
+ set ret [catch {eval $e $envarg $envval} env]
+
+ if { $ret != 0 } {
+ # If the env open failed, it may be because we're on a
+ # platform such as HP-UX 10 that won't support mutexes
+ # in shmget memory. Verify that the return value was
+ # EINVAL or EOPNOTSUPP and bail gracefully.
+ error_check_good \
+ is_shm_test [is_substr $envarg -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning: platform\
+ does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ } else {
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Check that getter retrieves expected retval.
+ set get_retval [eval $env $getter]
+ if { [is_substr $get_retval $retval] != 1 } {
+ puts "FAIL: $retval\
+ should be a substring of $get_retval"
+ continue
+ }
+ error_check_good envclose [$env close] 0
+
+ # The -encryptany flag can only be tested on an existing
+ # environment that supports encryption, so do it here.
+ if { $has_crypto == 1 } {
+ if { $envarg == "-encryptaes" } {
+ set env [eval berkdb_env -home $testdir\
+ -encryptany $passwd]
+ error_check_good get_encryptany \
+ [eval $env get_encrypt_flags] \
+ "-encryptaes"
+ error_check_good envclose [$env close] 0
+ }
+ }
+ }
+ }
+
+ puts "\tEnv007.d: Test berkdb env options using set_flags and getters."
+
+ # The flaglist variable contains options that can be set using
+ # $env set_flags.
+ set flaglist {
+ { "-direct_db" }
+ { "-nolock" }
+ { "-nommap" }
+ { "-nopanic" }
+ { "-nosync" }
+ { "-overwrite" }
+ { "-panic" }
+ { "-wrnosync" }
+ }
+ set e "berkdb_env_noerr -create -mode 0644 -home $testdir"
+ set directlist {-direct_db}
+ foreach item $flaglist {
+ set flag [lindex $item 0]
+ env_cleanup $testdir
+
+ # Set up env
+ set env [eval $e]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Use set_flags to turn on new env characteristics.
+ #
+ # Unconfigured/unsupported direct I/O is not reported
+ # as a failure.
+ if {[catch { $env set_flags $flag on } res ]} {
+ if { [lsearch $directlist $flag] != -1 && \
+ [is_substr $res $directmsg] == 1 } {
+ error_check_good env_close [$env close] 0
+ continue
+ } else {
+ puts "FAIL: $res"
+ error_check_good env_close [$env close] 0
+ continue
+ }
+ } else {
+ error_check_good "flag $flag on" $res 0
+ }
+
+ # Check that getter retrieves expected retval.
+ set get_retval [eval $env get_flags]
+ if { [is_substr $get_retval $flag] != 1 } {
+ puts "FAIL: $flag should be a substring of $get_retval"
+ error_check_good env_close [$env close] 0
+ continue
+ }
+ # Use set_flags to turn off env characteristics, make sure
+ # they are gone.
+ error_check_good "flag $flag off" [$env set_flags $flag off] 0
+ set get_retval [eval $env get_flags]
+ if { [is_substr $get_retval $flag] == 1 } {
+ puts "FAIL: $flag should not be in $get_retval"
+ error_check_good env_close [$env close] 0
+ continue
+ }
+
+ error_check_good envclose [$env close] 0
+ }
+ puts "\tEnv007.d1: Test berkdb env options using set_log_config and getters."
+
+ # The flaglist variable contains options that can be set using
+ # $env log_config.
+ set flaglist {
+ { "autoremove" }
+ { "direct" }
+ { "dsync" }
+ { "zero" }
+ }
+ set e "berkdb_env_noerr -create -txn -mode 0644 -home $testdir"
+ set directlist {direct}
+ foreach item $flaglist {
+ set flag [lindex $item 0]
+ env_cleanup $testdir
+
+ # Set up env
+ set env [eval $e]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Use set_flags to turn on new env characteristics.
+ #
+ # Unconfigured/unsupported direct I/O is not reported
+ # as a failure.
+ if {[catch { $env log_config "$flag on" } res ]} {
+ if { [lsearch $directlist $flag] != -1 && \
+ [is_substr $res $directmsg] == 1 } {
+ error_check_good env_close [$env close] 0
+ continue
+ } else {
+ puts "FAIL: $res"
+ error_check_good env_close [$env close] 0
+ continue
+ }
+ } else {
+ error_check_good "flag $flag on" $res 0
+ }
+
+ # Check that getter retrieves expected retval.
+ set get_retval [eval $env log_get_config $flag]
+ if { $get_retval != 1 } {
+ puts "FAIL: $flag is not on"
+ error_check_good env_close [$env close] 0
+ continue
+ }
+ # Use set_flags to turn off env characteristics, make sure
+ # they are gone.
+ error_check_good "flag $flag off" [$env log_config "$flag off"] 0
+ set get_retval [eval $env log_get_config $flag]
+ if { $get_retval == 1 } {
+ puts "FAIL: $flag should off"
+ error_check_good env_close [$env close] 0
+ continue
+ }
+
+ error_check_good envclose [$env close] 0
+ }
+
+ puts "\tEnv007.e: Test env get_home."
+ env_cleanup $testdir
+ # Set up env
+ set env [eval $e]
+ error_check_good env_open [is_valid_env $env] TRUE
+ # Test for correct value.
+ set get_retval [eval $env get_home]
+ error_check_good get_home $get_retval $testdir
+ error_check_good envclose [$env close] 0
+
+ puts "\tEnv007.f: Test that bad config values are rejected."
+ set cfglist {
+ { "set_cachesize" "1048576" }
+ { "set_flags" "db_xxx" }
+ { "set_flags" "1" }
+ { "set_flags" "db_txn_nosync x" }
+ { "set_lg_bsize" "db_xxx" }
+ { "set_lg_max" "db_xxx" }
+ { "set_lg_regionmax" "db_xxx" }
+ { "set_lk_detect" "db_xxx" }
+ { "set_lk_detect" "1" }
+ { "set_lk_detect" "db_lock_youngest x" }
+ { "set_lk_max_locks" "db_xxx" }
+ { "set_lk_max_lockers" "db_xxx" }
+ { "set_lk_max_objects" "db_xxx" }
+ { "set_mp_max_openfd" "1 2" }
+ { "set_mp_max_write" "1 2 3" }
+ { "set_mp_mmapsize" "db_xxx" }
+ { "set_region_init" "db_xxx" }
+ { "set_shm_key" "db_xxx" }
+ { "set_tas_spins" "db_xxx" }
+ { "set_tx_max" "db_xxx" }
+ { "set_verbose" "db_xxx" }
+ { "set_verbose" "1" }
+ { "set_verbose" "db_verb_recovery x" }
+ }
+
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorCode EINVAL] 1
+ }
+
+ puts "\tEnv007.g: Config name error set_xxx"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ env007_make_config "set_xxx" 1
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "unrecognized name-value pair"] 1
+
+ puts "\tEnv007.h: Test berkdb open flags and getters."
+ # Check options that we configure with berkdb open and
+ # query via getters. Structure of the list is:
+ # 0. Flag used in berkdb open command
+ # 1. Value specified to flag
+ # 2. Specific method, if needed
+ # 3. Arg used in getter
+ set olist {
+ { "-minkey" "4" " -btree " "get_bt_minkey" }
+ { "-cachesize" "0 1048576 1" "" "get_cachesize" }
+ { "" "FILENAME DBNAME" "" "get_dbname" }
+ { "" "" "" "get_env" }
+ { "-errpfx" "ERROR:" "" "get_errpfx" }
+ { "" "-chksum" "" "get_flags" }
+ { "-delim" "58" "-recno" "get_re_delim" }
+ { "" "-dup" "" "get_flags" }
+ { "" "-dup -dupsort" "" "get_flags" }
+ { "" "-recnum" "" "get_flags" }
+ { "" "-revsplitoff" "" "get_flags" }
+ { "" "-renumber" "-recno" "get_flags" }
+ { "" "-snapshot" "-recno" "get_flags" }
+ { "" "-create" "" "get_open_flags" }
+ { "" "-create -read_uncommitted" "" "get_open_flags" }
+ { "" "-create -excl" "" "get_open_flags" }
+ { "" "-create -nommap" "" "get_open_flags" }
+ { "" "-create -thread" "" "get_open_flags" }
+ { "" "-create -truncate" "" "get_open_flags" }
+ { "-ffactor" "40" " -hash " "get_h_ffactor" }
+ { "-lorder" "4321" "" "get_lorder" }
+ { "-nelem" "10000" " -hash " "get_h_nelem" }
+ { "-pagesize" "4096" "" "get_pagesize" }
+ { "-extent" "4" "-queue" "get_q_extentsize" }
+ { "-len" "20" "-recno" "get_re_len" }
+ { "-pad" "0" "-recno" "get_re_pad" }
+ { "-source" "include.tcl" "-recno" "get_re_source" }
+ }
+
+ set o "berkdb_open_noerr -create -mode 0644"
+ foreach item $olist {
+ cleanup $testdir NULL
+ set flag [lindex $item 0]
+ set flagval [lindex $item 1]
+ set method [lindex $item 2]
+ if { $method == "" } {
+ set method " -btree "
+ }
+ set getter [lindex $item 3]
+
+ puts "\t\tEnv007.h: $flag $flagval"
+
+ # Check that open is successful with the flag.
+ # The option -cachesize requires grouping for $flagval.
+ if { $flag == "-cachesize" } {
+ set ret [catch {eval $o $method $flag {$flagval}\
+ $testdir/a.db} db]
+ } else {
+ set ret [catch {eval $o $method $flag $flagval\
+ $testdir/a.db} db]
+ }
+ if { $ret != 0 } {
+ # If the open failed, it may be because we're on a
+ # platform such as HP-UX 10 that won't support
+ # locks in process-local memory.
+ # Verify that the return value was EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good \
+ is_thread_test [is_substr $flagval -thread] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning: platform does not support\
+ locks inside process-local memory."
+ puts "Skipping test of -thread flag."
+ } else {
+ error_check_good dbopen:0 [is_valid_db $db] TRUE
+
+ # Check that getter retrieves the correct value.
+ # Cachesizes under 500MB are adjusted upward to
+ # about 25% so just make sure we're in the right
+ # ballpark, between 1.2 and 1.3 of the original value.
+ if { $flag == "-cachesize" } {
+ set retval [eval $db $getter]
+ set retbytes [lindex $retval 1]
+ set setbytes [lindex $flagval 1]
+ error_check_good cachesize_low [expr\
+ $retbytes > [expr $setbytes * 6 / 5]] 1
+ error_check_good cachesize_high [expr\
+ $retbytes < [expr $setbytes * 13 / 10]] 1
+ } else {
+ error_check_good get_flagval \
+ [eval $db $getter] $flagval
+ }
+ error_check_good dbclose:0 [$db close] 0
+ }
+ }
+
+ puts "\tEnv007.i: Test berkdb_open -rdonly."
+ # This test is done separately because -rdonly can only be specified
+ # on an already existing database.
+ set flag "-rdonly"
+ set db [eval berkdb_open $flag $testdir/a.db]
+ error_check_good open_rdonly [is_valid_db $db] TRUE
+
+ error_check_good get_rdonly [eval $db get_open_flags] $flag
+ error_check_good dbclose:0 [$db close] 0
+
+ puts "\tEnv007.j: Test berkdb open flags and getters\
+ requiring environments."
+ # Check options that we configure with berkdb open and
+ # query via getters. Structure of the list is:
+ # 0. Flag used in berkdb open command
+ # 1. Value specified to flag
+ # 2. Specific method, if needed
+ # 3. Arg used in getter
+ # 4. Additional flags needed in setting up env
+
+ set elist {
+ { "" "-auto_commit" "" "get_open_flags" "" }
+ }
+
+ if { $has_crypto == 1 } {
+ lappend elist \
+ { "" "-encrypt" "" "get_flags" "-encryptaes $passwd" }
+ }
+
+ set e "berkdb_env -create -home $testdir -txn "
+ set o "berkdb_open -create -btree -mode 0644 "
+ foreach item $elist {
+ env_cleanup $testdir
+ set flag [lindex $item 0]
+ set flagval [lindex $item 1]
+ set method [lindex $item 2]
+ if { $method == "" } {
+ set method " -btree "
+ }
+ set getter [lindex $item 3]
+ set envflag [lindex $item 4]
+
+ # Check that open is successful with the flag.
+ set env [eval $e $envflag]
+ set db [eval $o -env $env $flag $flagval a.db]
+ error_check_good dbopen:0 [is_valid_db $db] TRUE
+
+ # Check that getter retrieves the correct value
+ set get_flagval [eval $db $getter]
+ error_check_good get_flagval [is_substr $get_flagval $flagval] 1
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+ }
+}
+
+proc env007_check { env statcmd statstr testval } {
+ set stat [$env $statcmd]
+ set checked 0
+ foreach statpair $stat {
+ if {$checked == 1} {
+ break
+ }
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $statstr] != 0} {
+ set checked 1
+ error_check_good $statstr:ck $statval $testval
+ }
+ }
+ error_check_good $statstr:test $checked 1
+}
+
+proc env007_make_config { carg cval } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "$carg $cval"
+ close $cid
+}
diff --git a/db-4.8.30/test/env008.tcl b/db-4.8.30/test/env008.tcl
new file mode 100644
index 0000000..8ebb885
--- /dev/null
+++ b/db-4.8.30/test/env008.tcl
@@ -0,0 +1,72 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env008
+# TEST Test environments and subdirectories.
+proc env008 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ env_cleanup $testdir
+
+ set subdir 1/1
+ set subdir1 1/2
+ file mkdir $testdir/$subdir $testdir/$subdir1
+ set testfile $subdir/env.db
+
+ puts "Env008: Test of environments and subdirectories."
+
+ puts "\tEnv008.a: Create env and db."
+ set env [berkdb_env -create -mode 0644 -home $testdir -txn]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tEnv008.b: Remove db in subdir."
+ env008_db $env $testfile
+ error_check_good dbremove:$testfile \
+ [berkdb dbremove -env $env $testfile] 0
+
+ #
+ # Rather than remaking the db every time for the renames
+ # just move around the new file name to another new file
+ # name.
+ #
+ puts "\tEnv008.c: Rename db in subdir."
+ env008_db $env $testfile
+ set newfile $subdir/new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.d: Rename db to parent dir."
+ set newfile $subdir/../new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.e: Rename db to child dir."
+ set newfile $subdir/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.f: Rename db to another dir."
+ set newfile $subdir1/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+
+ error_check_good envclose [$env close] 0
+ puts "\tEnv008 complete."
+}
+
+proc env008_db { env testfile } {
+ set db [berkdb_open -env $env -create -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db put key data]
+ error_check_good dbput $ret 0
+ error_check_good dbclose [$db close] 0
+}
diff --git a/db-4.8.30/test/env009.tcl b/db-4.8.30/test/env009.tcl
new file mode 100644
index 0000000..446ae80
--- /dev/null
+++ b/db-4.8.30/test/env009.tcl
@@ -0,0 +1,81 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env009
+# TEST Test calls to all the various stat functions. We have several
+# TEST sprinkled throughout the test suite, but this will ensure that
+# TEST we run all of them at least once.
+proc env009 { } {
+ source ./include.tcl
+
+ puts "Env009: Various stat functions test."
+
+ env_cleanup $testdir
+ puts "\tEnv009.a: Setting up env and a database."
+
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
+ error_check_good dbopen [is_valid_db $dbbt] TRUE
+ set dbh [berkdb_open -create -hash $testdir/env009h.db]
+ error_check_good dbopen [is_valid_db $dbh] TRUE
+ set dbq [berkdb_open -create -queue $testdir/env009q.db]
+ error_check_good dbopen [is_valid_db $dbq] TRUE
+
+ puts "\tEnv009.b: Setting up replication master and client envs."
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ repladd 1
+ set repenv(M) [berkdb_env -create -home $masterdir \
+ -txn -rep_master -rep_transport [list 1 replsend]]
+ repladd 2
+ set repenv(C) [berkdb_env -create -home $clientdir \
+ -txn -rep_client -rep_transport [list 2 replsend]]
+
+ set rlist {
+ { "lock_stat" "Maximum locks" "Env009.c" $e }
+ { "log_stat" "Magic" "Env009.d" "$e" }
+ { "mpool_stat" "Number of caches" "Env009.e" "$e"}
+ { "txn_stat" "Maximum txns" "Env009.f" "$e" }
+ { "rep_stat" "{Environment ID} 1" "Env009.g (Master)" "$repenv(M)"}
+ { "rep_stat" "{Environment ID} 2" "Env009.h (Client)" "$repenv(C)"}
+ }
+
+ foreach set $rlist {
+ set cmd [lindex $set 0]
+ set str [lindex $set 1]
+ set msg [lindex $set 2]
+ set env [lindex $set 3]
+ puts "\t$msg: $cmd"
+ set ret [eval $env $cmd]
+ error_check_good $cmd [is_substr $ret $str] 1
+ }
+
+ puts "\tEnv009.i: btree stats"
+ set ret [$dbbt stat]
+ error_check_good $cmd [is_substr $ret "Leaf pages"] 1
+
+ puts "\tEnv009.j: hash stats"
+ set ret [$dbh stat]
+ error_check_good $cmd [is_substr $ret "Buckets"] 1
+
+ puts "\tEnv009.k: queue stats"
+ set ret [$dbq stat]
+ error_check_good $cmd [is_substr $ret "Extent size"] 1
+
+ # Clean up.
+ error_check_good dbclose [$dbbt close] 0
+ error_check_good dbclose [$dbh close] 0
+ error_check_good dbclose [$dbq close] 0
+ error_check_good masterenvclose [$repenv(M) close] 0
+ error_check_good clientenvclose [$repenv(C) close] 0
+ replclose $testdir/MSGQUEUEDIR
+ error_check_good envclose [$e close] 0
+}
diff --git a/db-4.8.30/test/env010.tcl b/db-4.8.30/test/env010.tcl
new file mode 100644
index 0000000..efcd379
--- /dev/null
+++ b/db-4.8.30/test/env010.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env010
+# TEST Run recovery in an empty directory, and then make sure we can still
+# TEST create a database in that directory.
+proc env010 { } {
+ source ./include.tcl
+
+ puts "Env010: Test of recovery in an empty directory."
+
+ # Create a new directory used only for this test
+
+ if { [file exists $testdir/EMPTYDIR] != 1 } {
+ file mkdir $testdir/EMPTYDIR
+ } else {
+ puts "\nDirectory already exists."
+ }
+
+ # Do the test twice, for regular recovery and catastrophic
+ # Open environment and recover, but don't create a database
+
+ foreach rmethod {recover recover_fatal} {
+
+ puts "\tEnv010: Creating env for $rmethod test."
+ env_cleanup $testdir/EMPTYDIR
+ set e [berkdb_env \
+ -create -home $testdir/EMPTYDIR -txn -$rmethod]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ # Open and close a database
+ # The method doesn't matter, so picked btree arbitrarily
+
+ set db [eval {berkdb_open -env $e \
+ -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Close environment
+
+ error_check_good envclose [$e close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/EMPTYDIR] 0
+ }
+ puts "\tEnv010 complete."
+}
diff --git a/db-4.8.30/test/env011.tcl b/db-4.8.30/test/env011.tcl
new file mode 100644
index 0000000..1f79372
--- /dev/null
+++ b/db-4.8.30/test/env011.tcl
@@ -0,0 +1,38 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env011
+# TEST Run with region overwrite flag.
+proc env011 { } {
+ source ./include.tcl
+
+ puts "Env011: Test of region overwriting."
+ env_cleanup $testdir
+
+ puts "\tEnv011: Creating/closing env for open test."
+ set e [berkdb_env -create -overwrite -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [eval \
+ {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [eval {$db put} "aaa" "data"]
+ error_check_good put $ret 0
+ set ret [eval {$db put} "bbb" "data"]
+ error_check_good put $ret 0
+ error_check_good db_close [$db close] 0
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Opening the environment with overwrite set."
+ set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Removing the environment with overwrite set."
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir -overwrite] 0
+
+ puts "\tEnv011 complete."
+}
diff --git a/db-4.8.30/test/env012.tcl b/db-4.8.30/test/env012.tcl
new file mode 100644
index 0000000..895dc16
--- /dev/null
+++ b/db-4.8.30/test/env012.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env012
+# TEST Test DB_REGISTER.
+# TEST
+# TEST DB_REGISTER will fail on systems without fcntl. If it
+# TEST fails, make sure we got the expected DB_OPNOTSUP return.
+# TEST
+# TEST Then, the real tests:
+# TEST For each test, we start a process that opens an env with -register.
+# TEST
+# TEST 1. Verify that a 2nd process can enter the existing env with -register.
+# TEST
+# TEST 2. Kill the 1st process, and verify that the 2nd process can enter
+# TEST with "-register -recover".
+# TEST
+# TEST 3. Kill the 1st process, and verify that the 2nd process cannot
+# TEST enter with just "-register".
+# TEST
+# TEST 4. While the 1st process is still running, a 2nd process enters
+# TEST with "-register". Kill the 1st process. Verify that a 3rd process
+# TEST can enter with "-register -recover". Verify that the 3rd process,
+# TEST entering, causes process 2 to fail with the message DB_RUNRECOVERY.
+# TEST
+# TEST 5. We had a bug where recovery was always run with -register
+# TEST if there were empty slots in the process registry file. Verify
+# TEST that recovery doesn't automatically run if there is an empty slot.
+# TEST
+# TEST 6. Verify process cannot connect when specifying -failchk and an
+# TEST isalive function has not been declared.
+# TEST
+# TEST 7. Verify that a 2nd process can enter the existing env with -register
+# TEST and -failchk and having specified an isalive function
+# TEST
+# TEST 8. Kill the 1st process, and verify that the 2nd process can enter
+# TEST with "-register -failchk -recover"
+# TEST
+# TEST 9. 2nd process enters with "-register -failchk". Kill the 1st process.
+# TEST 2nd process may get blocked on a mutex held by process one. Verify
+# TEST 3rd process can enter with "-register -recover -failchk". 3rd process
+# TEST should run failchk, clear out open txn/log from process 1. It will
+# TEST enter env without need for any additional recovery. We look for
+# TEST "Freeing log information .." sentence in the log for 3rd process as
+# TEST an indication that failchk ran. If DB_RUNRECOVERY were returned
+# TEST instead it would mean failchk could not recover.
+
+proc env012 { } {
+ source ./include.tcl
+ set tnum "012"
+
+ puts "Env$tnum: Test of DB_REGISTER."
+
+ puts "\tEnv$tnum.a: Platforms without fcntl fail with DB_OPNOTSUP."
+ env_cleanup $testdir
+ if {[catch {eval {berkdb_env} \
+ -create -home $testdir -txn -register -recover} env]} {
+ error_check_good fail_OPNOTSUP [is_substr $env DB_OPNOTSUP] 1
+ puts "Skipping env$tnum; DB_REGISTER is not supported."
+ }
+ error_check_good env_close [$env close] 0
+
+ puts "\tEnv$tnum.b: Second process can join with -register."
+ env_cleanup $testdir
+ set testfile TESTFILE
+ set key KEY
+ set data DATA1
+
+ puts "\t\tEnv$tnum.b1: Start process 1."
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER 0 10 &]
+
+ # Wait a while so process 1 has a chance to get going.
+ tclsleep 2
+
+ puts "\t\tEnv$tnum.b2: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile GET $key $data 0 0 0 &]
+
+ watch_procs $p1 1 120
+ watch_procs $p2 1 120
+
+ # Check log files for failures.
+ logcheck $testdir/env$tnum.log.p1
+ logcheck $testdir/env$tnum.log.p2
+
+ puts "\tEnv$tnum.c: Second process can join with -register\
+ -recover after first process is killed."
+ env_cleanup $testdir
+
+ puts "\t\tEnv$tnum.c1: Start process 1."
+ set pids {}
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER 0 10 &]
+ lappend pids $p1
+ tclsleep 2
+
+ puts "\t\tEnv$tnum.c2: Kill process 1."
+ set pids [findprocessids $testdir $pids]
+ foreach pid $pids {
+ tclkill $pid
+ }
+
+ puts "\t\tEnv$tnum.c3: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile GET $key $data RECOVER 0 0 &]
+
+ watch_procs $p2 1 120
+
+ # Check log files for failures.
+ logcheck $testdir/env$tnum.log.p1
+ logcheck $testdir/env$tnum.log.p2
+
+ if { $is_windows_test == 1 } {
+ puts "Skipping sections .d and .e on Windows platform."
+ } else {
+ puts "\tEnv$tnum.d: Second process cannot join without -recover\
+ after first process is killed."
+ env_cleanup $testdir
+
+ puts "\t\tEnv$tnum.d1: Start process 1."
+ set pids {}
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER 0 10 &]
+ lappend pids $p1
+ tclsleep 2
+
+ puts "\t\tEnv$tnum.d2: Kill process 1."
+ set pids [findprocessids $testdir $pids]
+ foreach pid $pids {
+ tclkill $pid
+ }
+
+ puts "\t\tEnv$tnum.d3: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile GET $key $data 0 0 0 &]
+ tclsleep 2
+ watch_procs $p2 1 120
+
+ # Check log files. Log p1 should be clean, but we
+ # expect DB_RUNRECOVERY in log p2.
+ logcheck $testdir/env$tnum.log.p1
+ logcheckfails $testdir/env$tnum.log.p2 DB_RUNRECOVERY
+
+ puts "\tEnv$tnum.e: Running registered process detects failure."
+ env_cleanup $testdir
+
+ puts "\t\tEnv$tnum.e1: Start process 1."
+ set pids {}
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER 0 10 &]
+ lappend pids $p1
+ tclsleep 2
+
+ # Identify child process to kill later.
+ set pids [findprocessids $testdir $pids]
+
+ puts "\t\tEnv$tnum.e2: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile LOOP $key $data 0 0 10 &]
+
+ puts "\t\tEnv$tnum.e3: Kill process 1."
+ foreach pid $pids {
+ tclkill $pid
+ }
+
+ puts "\t\tEnv$tnum.e4: Start process 3."
+ set p3 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p3 \
+ $testdir $testfile GET $key $data RECOVER 0 0 &]
+ tclsleep 2
+
+ watch_procs $p2 1 120
+ watch_procs $p3 1 120
+
+ # Check log files. Logs p1 and p3 should be clean, but we
+ # expect DB_RUNRECOVERY in log p2.
+ logcheck $testdir/env$tnum.log.p1
+ logcheckfails $testdir/env$tnum.log.p2 DB_RUNRECOVERY
+ logcheck $testdir/env$tnum.log.p3
+ }
+
+ puts "\tEnv$tnum.f: Empty slot shouldn't cause automatic recovery."
+
+ # Create 2 empty slots in the registry by letting two processes
+ # run to completion.
+ puts "\t\tEnv$tnum.f1: Start process 1."
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER 0 1 &]
+
+ puts "\t\tEnv$tnum.f2: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile GET $key $data 0 0 1 &]
+
+ watch_procs $p1 1 60
+ watch_procs $p2 1 60
+
+ logcheck $testdir/env$tnum.log.p1
+ logcheck $testdir/env$tnum.log.p2
+
+ # Start two more process. Neither should signal a need for recovery.
+ puts "\t\tEnv$tnum.f3: Start process 3."
+ set p3 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p3 \
+ $testdir $testfile GET $key $data RECOVER 0 10 &]
+
+ tclsleep 2
+
+ puts "\t\tEnv$tnum.f4: Start process 4."
+ set p4 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p4 \
+ $testdir $testfile PUT $key $data 0 0 10 &]
+
+ watch_procs $p3 1 120
+ watch_procs $p4 1 120
+
+ # Check log files: neither process should have returned DB_RUNRECOVERY.
+ logcheck $testdir/env$tnum.log.p3
+ logcheck $testdir/env$tnum.log.p4
+
+ puts "\tEnv$tnum.g: One process with -register & -failchk & no isalive"
+
+ # use -failchk only, test will fail as isalive function is needed
+ puts "\t\tEnv$tnum.g1: Start process 1."
+ env_cleanup $testdir
+
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER FAILCHK0 10 &]
+
+ watch_procs $p1 1 60
+
+ # Check log files for failures. Expect to see a failure.
+ logcheckfails $testdir/env$tnum.log.p1 DB_FAILCHK
+
+ puts "\tEnv$tnum.h: Second process joins with -register and -failchk."
+ env_cleanup $testdir
+
+ # use -failchk and -isalive flags
+ puts "\t\tEnv$tnum.h1: Start process 1."
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER FAILCHK1 10 &]
+
+ # Wait a while so process 1 has a chance to get going.
+ tclsleep 2
+
+ puts "\t\tEnv$tnum.h2: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile GET $key $data 0 FAILCHK1 0 &]
+
+ watch_procs $p1 1 120
+ watch_procs $p2 1 120
+
+ # Check log files for failures.
+ logcheck $testdir/env$tnum.log.p1
+ logcheck $testdir/env$tnum.log.p2
+
+ puts "\tEnv$tnum.i: Second process can join with -register\
+ -recover -failchk after first process is killed."
+ env_cleanup $testdir
+
+ puts "\t\tEnv$tnum.i1: Start process 1."
+ set pids {}
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile PUT $key $data RECOVER FAILCHK1 10 &]
+ lappend pids $p1
+ tclsleep 2
+
+ puts "\t\tEnv$tnum.i2: Kill process 1."
+ set pids [findprocessids $testdir $pids]
+ foreach pid $pids {
+ tclkill $pid
+ }
+
+ puts "\t\tEnv$tnum.i3: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile GET $key $data RECOVER FAILCHK1 0 &]
+
+ watch_procs $p2 1 120
+
+ # Check log files for failures.
+ logcheck $testdir/env$tnum.log.p1
+ logcheck $testdir/env$tnum.log.p2
+
+ if { $is_windows_test == 1 } {
+ puts "Skipping sections .j on Windows platform."
+ } else {
+ puts "\tEnv$tnum.j: Running registered process detects failure and recovers."
+ env_cleanup $testdir
+
+ puts "\t\tEnv$tnum.j1: Start process 1."
+ set pids {}
+ set p1 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p1 \
+ $testdir $testfile LOOP $key $data RECOVER FAILCHK1 5 &]
+ lappend pids $p1
+ tclsleep 2
+
+ # Identify child process to kill later.
+ set pids [findprocessids $testdir $pids]
+
+ puts "\t\tEnv$tnum.j2: Start process 2."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p2 \
+ $testdir $testfile LOOP $key $data 0 0 10 &]
+
+ puts "\t\tEnv$tnum.j3: Kill process 1."
+ foreach pid $pids {
+ tclkill $pid
+ }
+
+ #identify child process 2, do after process 1 has died
+ lappend pids $p2
+
+ # Identify child process to kill later.
+ set pids [findprocessids $testdir $pids]
+
+ puts "\t\tEnv$tnum.j4: Start process 3."
+ set p3 [exec $tclsh_path $test_path/wrap.tcl envscript.tcl \
+ $testdir/env$tnum.log.p3 \
+ $testdir $testfile GET $key $data RECOVER FAILCHK1 0 &]
+ #sleep for approx 20 seconds -- process 2 should still be going
+ tclsleep 20
+
+ puts "\t\tEnv$tnum.j5: Kill process 2."
+ foreach pid $pids {
+ tclkill $pid
+ }
+
+ watch_procs $p3 1 30
+
+ # Check log files. Logs p1 and p2 should be clean, but we
+ # expect failchk messages in p3 from cleanup
+ logcheckfails $testdir/env$tnum.log.p3 Freeing
+ logcheck $testdir/env$tnum.log.p2
+ logcheck $testdir/env$tnum.log.p1
+ }
+}
+
+# Check log file and report failures with FAIL. Use this when
+# we don't expect failures.
+proc logcheck { logname } {
+ set errstrings [eval findfail $logname]
+ foreach errstring $errstrings {
+ puts "FAIL: error in $logname : $errstring"
+ }
+}
+
+# When we expect a failure, verify we find the one we expect.
+proc logcheckfails { logname message } {
+ set f [open $logname r]
+ while { [gets $f line] >= 0 } {
+ if { [is_substr $line $message] == 1 } {
+ close $f
+ return 0
+ }
+ }
+ close $f
+ puts "FAIL: Did not find expected error $message."
+}
+
+# The script wrap.tcl creates a parent and a child process. We
+# can't see the child pids, so find them by their sentinel files.
+# This creates a list where the parent pid is always listed
+# before the child pid.
+proc findprocessids { testdir plist } {
+ set beginfiles [glob $testdir/begin.*]
+ foreach b $beginfiles {
+ regsub $testdir/begin. $b {} pid
+ if { [lsearch -exact $plist $pid] == -1 } {
+ lappend plist $pid
+ }
+ }
+ return $plist
+}
+
diff --git a/db-4.8.30/test/env013.tcl b/db-4.8.30/test/env013.tcl
new file mode 100644
index 0000000..2c47f34
--- /dev/null
+++ b/db-4.8.30/test/env013.tcl
@@ -0,0 +1,84 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env013
+# TEST Test of basic functionality of fileid_reset.
+# TEST
+# TEST Create a database in an env. Copy it to a new file within
+# TEST the same env. Reset the file id and make sure it has changed.
+proc env013 { } {
+ source ./include.tcl
+ global util_path
+
+ puts "Env013: Test fileid_reset."
+
+ set testfile A.db
+ set dupfile B.db
+ set nentries 500
+ set filenames "A B C D E"
+
+ foreach lorder { 1234 4321 } {
+ puts "\tEnv013.a: Creating env."
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Open database A, populate and close.
+ puts "\tEnv013.b: Creating database with lorder $lorder."
+ foreach filename $filenames {
+ set db [eval {berkdb_open \
+ -pagesize 8192 -env $env -auto_commit \
+ -btree -create -mode 0644 $testfile $filename} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set key KEY.$i
+ set data DATA.$i
+ error_check_good\
+ db_put [$db put -txn $t $key $data] 0
+ }
+ error_check_good txn_commit [$t commit] 0
+ error_check_good db_close [$db close] 0
+ }
+
+ # Copy database file A to database file B for fileid testing.
+ puts "\tEnv013.c: Copy database."
+ file copy -force $testdir/$testfile $testdir/$dupfile
+
+ # Reset B's fileid and confirm the ID has changed.
+ puts "\tEnv013.d: Resetting file id for copied database."
+ error_check_good fileid_reset [$env id_reset $dupfile] 0
+ set orig_id [getfileid $testdir/$testfile]
+ puts "\tEnv013.d: orig: $orig_id"
+ set new_id [getfileid $testdir/$dupfile]
+ puts "\tEnv013.d: new: $new_id"
+ error_check_bad id_changed $orig_id $new_id
+
+ # Verify and open B.
+ puts "\tEnv013.e: Verify and open database copy."
+ error_check_good verify [verify_dir $testdir "\tEnv013.e: "] 0
+ set db [eval {berkdb_open} \
+ -env $env -auto_commit -btree -mode 0644 -rdonly $dupfile]
+ error_check_good dup_open [is_valid_db $db] TRUE
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+}
+
+# Get file id number, identified as "uid" in db_stat.
+proc getfileid { db } {
+ global util_path
+
+ set ret [exec $util_path/db_dump -da $db]
+ set uidstart [string first "uid:" $ret]
+ set uidend [string first "\tminkey:" $ret]
+ set uid [string range $ret $uidstart $uidend]
+ set uid [string trimright $uid]
+ return $uid
+}
diff --git a/db-4.8.30/test/env014.tcl b/db-4.8.30/test/env014.tcl
new file mode 100644
index 0000000..9fbdc07
--- /dev/null
+++ b/db-4.8.30/test/env014.tcl
@@ -0,0 +1,117 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env014
+# TEST
+# TEST Make sure that attempts to open an environment with
+# TEST incompatible flags (e.g. replication without transactions)
+# TEST fail with the appropriate messages.
+# TEST
+# TEST A new thread of control joining an env automatically
+# TEST initializes the same subsystems as the original env.
+# TEST Make sure that the attempt to change subsystems when
+# TEST joining an env fails with the appropriate messages.
+
+proc env014 { } {
+ source ./include.tcl
+
+ set tnum "014"
+ puts "Env$tnum: Environment subsystem initialization and env joins."
+ env_cleanup $testdir
+
+ # Open an env with -recover but not -create; should fail.
+ puts "\tEnv$tnum.a: Open env with -recover but not -create."
+ catch {set env [berkdb_env_noerr -recover -txn -home $testdir]} ret
+ error_check_good recover_wo_create \
+ [is_substr $ret "requires the create flag"] 1
+
+ # Open an env with -recover but not -txn; should fail.
+ puts "\tEnv$tnum.b: Open env with -recover but not -txn."
+ catch {set env [berkdb_env_noerr -create -recover -home $testdir]} ret
+ error_check_good recover_wo_txn \
+ [is_substr $ret "requires transaction support"] 1
+
+ # Open an env with -replication but not -lock; should fail.
+ puts "\tEnv$tnum.c: Open env with -rep but not -lock."
+ catch {set env\
+ [berkdb_env_noerr -create -rep_master -home $testdir]} ret
+ error_check_good rep_wo_lock \
+ [is_substr $ret "requires locking support"] 1
+
+ # Open an env with -replication but not -txn; should fail.
+ puts "\tEnv$tnum.d: Open env with -rep but not -txn."
+ catch {set env\
+ [berkdb_env_noerr -create -rep_master -lock -home $testdir]} ret
+ error_check_good rep_wo_txn \
+ [is_substr $ret "requires transaction support"] 1
+
+ # Skip remainder of test for HP-UX; HP-UX does not allow
+ # opening a second handle on an environment.
+ if { $is_hp_test == 1 } {
+ puts "Skipping remainder of env$tnum for HP-UX."
+ return
+ }
+
+ # Join -txn env with -cdb; should fail.
+ puts "\tEnv$tnum.e: Join -txn env with -cdb."
+ set env [berkdb_env_noerr -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ catch {set env2 [berkdb_env_noerr -home $testdir -cdb]} ret
+ error_check_good txn+cdb [is_substr $ret "incompatible"] 1
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -force -home $testdir] 0
+
+ # Join -cdb env with -txn; should fail.
+ puts "\tEnv$tnum.f: Join -cdb env with -txn."
+ set env [berkdb_env_noerr -create -home $testdir -cdb]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ catch {set env2 [berkdb_env_noerr -home $testdir -txn]} ret
+ error_check_good cdb+txn [is_substr $ret "incompatible"] 1
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -force -home $testdir] 0
+
+ # Open an env with -txn. Join the env, and start a txn.
+ puts "\tEnv$tnum.g: Join -txn env, and start a txn."
+ set env [berkdb_env_noerr -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set env2 [berkdb_env_noerr -home $testdir]
+ error_check_good env2_open [is_valid_env $env2] TRUE
+
+ set txn [$env2 txn]
+ error_check_good env2_txn [is_valid_txn $txn $env2] TRUE
+ error_check_good txn_commit [$txn commit] 0
+
+ error_check_good env2_close [$env2 close] 0
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -force -home $testdir] 0
+
+ # Join -txn env with -lock; should succeed and use txns.
+ puts "\tEnv$tnum.h: Join -txn env with -lock, and start a txn."
+ set env [berkdb_env_noerr -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set env2 [berkdb_env_noerr -home $testdir -lock]
+ error_check_good env2_open [is_valid_env $env2] TRUE
+
+ set txn [$env2 txn]
+ error_check_good env2_txn [is_valid_txn $txn $env2] TRUE
+ error_check_good txn_commit [$txn commit] 0
+
+ error_check_good env2_close [$env2 close] 0
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -force -home $testdir] 0
+
+ # Join plain vanilla env with -txn; should fail.
+ puts "\tEnv$tnum.i: Join plain vanilla env with -txn."
+ set env [berkdb_env_noerr -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ catch {set env2 [berkdb_env_noerr -home $testdir -txn]} ret
+ error_check_good ds+txn [is_substr $ret "incompatible"] 1
+
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -force -home $testdir] 0
+}
diff --git a/db-4.8.30/test/env015.tcl b/db-4.8.30/test/env015.tcl
new file mode 100644
index 0000000..82fa1a2
--- /dev/null
+++ b/db-4.8.30/test/env015.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env015
+# TEST Rename the underlying directory of an env, make sure everything
+# TEST still works. Test runs with regular named databases and with
+# TEST in-memory named databases.
+proc env015 { } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set newdir NEWDIR
+
+ puts "Env015: Test of renaming env directories."
+
+ foreach dbtype { inmem ondisk } {
+ puts "\tEnv015.a: Create env."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tEnv015.b: Create $dbtype db."
+ if { $dbtype == "inmem" } {
+ set testfile { "" file1.db }
+ } else {
+ set testfile file1.db
+ }
+ set db [eval {berkdb_open} -create -env $env -btree $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 10 } { incr i } {
+ error_check_good db_put [$db put $i $i] 0
+ }
+
+ # When the database is on disk, we have a file handle open
+ # during the attempt to rename the directory. As far as we
+ # can tell, Windows doesn't allow this (that is, Windows
+ # doesn't allow directories to be renamed when there is an
+ # open handle inside them). For QNX, tclsh can not rename a
+ # directory correctly while there are shared memory files in
+ # that directory.
+ puts "\tEnv015.b: Rename directory."
+ if { $is_windows_test || $is_qnx_test } {
+ file mkdir $newdir
+ eval file rename -force [glob $testdir/*] $newdir
+ fileremove -force $testdir
+ } else {
+ file rename -force $testdir $newdir
+ }
+
+ puts "\tEnv015.c: Database is still available in new directory."
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ret [$db get $i]
+ error_check_good db_get [lindex [lindex $ret 0] 1] $i
+ }
+
+ puts "\tEnv015.d: Can't open database in old directory."
+ catch {set db2 [eval \
+ {berkdb_open} -env $env -btree $testdir/$testfile]} db2
+ error_check_bad open_fails [is_valid_db $db2] TRUE
+
+ puts \
+ "\tEnv015.e: Recreate directory with original name and use it."
+ file mkdir $testdir
+ set newenv [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_good newenv [is_valid_env $env] TRUE
+
+ set newdb [berkdb_open -create -env $newenv -btree foo.db]
+ error_check_good newdb_open [is_valid_db $newdb] TRUE
+
+ # There should not be any data in the new db.
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ret [$newdb get $i]
+ error_check_good db_get [llength $ret] 0
+ }
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good newdb_close [$newdb close] 0
+ error_check_good envclose [$env close] 0
+ error_check_good newenvclose [$newenv close] 0
+ fileremove -f $newdir
+ }
+}
diff --git a/db-4.8.30/test/env016.tcl b/db-4.8.30/test/env016.tcl
new file mode 100644
index 0000000..5eda93d
--- /dev/null
+++ b/db-4.8.30/test/env016.tcl
@@ -0,0 +1,135 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env016
+# TEST Replication settings and DB_CONFIG
+# TEST
+# TEST Create a DB_CONFIG for various replication settings. Use
+# TEST rep_stat or getter functions to verify they're set correctly.
+#
+proc env016 { } {
+ source ./include.tcl
+
+ puts "Env016: Replication DB_CONFIG settings."
+
+ #
+ # Test options that we query via rep_stat.
+ # Structure of the list is:
+ # 0. Arg used in DB_CONFIG.
+ # 1. Value assigned in DB_CONFIG.
+ # 2. Message output during test.
+ # 3. String to search for in stat output.
+ #
+ set slist {
+ { "rep_set_priority" "1" "Env016.a0: Priority"
+ "Environment priority" }
+ }
+ puts "\tEnv016.a: Check settings via rep_stat."
+ foreach l $slist {
+ set carg [lindex $l 0]
+ set val [lindex $l 1]
+ set msg [lindex $l 2]
+ set str [lindex $l 3]
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ repladd 1
+
+ # Open a master.
+ puts "\t\t$msg"
+ #
+ # Create DB_CONFIG.
+ #
+ env016_make_config $masterdir $carg $val
+ #
+ # Open env.
+ #
+ set ma_envcmd "berkdb_env_noerr -create -txn nosync \
+ -home $masterdir -errpfx MASTER -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd]
+ #
+ # Verify value
+ #
+ set gval [stat_field $masterenv rep_stat $str]
+ error_check_good stat_get $gval $val
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+ }
+
+ # Test options that we query via getter functions.
+ # Structure of the list is:
+ # 0. Arg used in DB_CONFIG.
+ # 1. Value assigned in DB_CONFIG.
+ # 2. Message output during test.
+ # 3. Getter command.
+ # 4. Getter results expected if different from #1 value.
+ set glist {
+ { "rep_set_clockskew" "102 100" "Env016.b0: Rep clockskew"
+ "rep_get_clockskew" }
+ { "rep_set_config" "db_rep_conf_bulk" "Env016.b1: Rep config"
+ "rep_get_config bulk" "1" }
+ { "rep_set_limit" "0 1048576" "Env016.b2: Rep limit"
+ "rep_get_limit" }
+ { "rep_set_nsites" "6" "Env016.b3: Rep nsites"
+ "rep_get_nsites" }
+ { "rep_set_request" "4000 128000" "Env016.b4: Rep request"
+ "rep_get_request" }
+ { "rep_set_timeout" "db_rep_election_timeout 500000"
+ "Env016.b5: Rep elect timeout" "rep_get_timeout election"
+ "500000" }
+ { "rep_set_timeout" "db_rep_checkpoint_delay 500000"
+ "Env016.b6: Rep ckp timeout"
+ "rep_get_timeout checkpoint_delay" "500000" }
+ }
+ puts "\tEnv016.b: Check settings via getter functions."
+ foreach l $glist {
+ set carg [lindex $l 0]
+ set val [lindex $l 1]
+ set msg [lindex $l 2]
+ set getter [lindex $l 3]
+ if { [llength $l] > 4 } {
+ set getval [lindex $l 4]
+ } else {
+ set getval $val
+ }
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ repladd 1
+
+ # Open a master.
+ puts "\t\t$msg"
+ #
+ # Create DB_CONFIG.
+ #
+ env016_make_config $masterdir $carg $val
+ #
+ # Open env.
+ #
+ set ma_envcmd "berkdb_env_noerr -create -txn nosync \
+ -home $masterdir -errpfx MASTER -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd]
+ #
+ # Verify value
+ #
+ set gval [eval $masterenv $getter]
+ error_check_good stat_get $gval $getval
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+ }
+}
+
+proc env016_make_config { dir carg cval } {
+ set cid [open $dir/DB_CONFIG w]
+ puts $cid "$carg $cval"
+ close $cid
+}
diff --git a/db-4.8.30/test/env017.tcl b/db-4.8.30/test/env017.tcl
new file mode 100644
index 0000000..d399b75
--- /dev/null
+++ b/db-4.8.30/test/env017.tcl
@@ -0,0 +1,582 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env017
+# TEST Check documented "stat" fields against the fields
+# TEST returned by the "stat" functions. Make sure they
+# TEST match, and that none are missing.
+# TEST These are the stat functions we test:
+# TEST env log_stat
+# TEST env lock_stat
+# TEST env txn_stat
+# TEST env mutex_stat
+# TEST env rep_stat
+# TEST env repmgr_stat
+# TEST env mpool_stat
+# TEST db_stat
+# TEST seq_stat
+
+
+proc env017 { } {
+ puts "\nEnv017: Check the integrity of the various stat"
+ env017_log_stat
+ env017_lock_stat
+ env017_txn_stat
+ env017_mutex_stat
+ env017_rep_stat
+ env017_repmgr_stat
+ env017_mpool_stat
+ env017_db_stat
+ env017_seq_stat
+}
+
+# Check the log stat field.
+proc env017_log_stat { } {
+ puts "\nEnv017: Check the Log stat field"
+ set check_type log_stat_check
+ set stat_method log_stat
+ set envargs {-create -log}
+ set map_list {
+ { "Magic" st_magic }
+ { "Log file Version" st_version }
+ { "Region size" st_regsize }
+ { "Log file mode" st_mode }
+ { "Log record cache size" st_lg_bsize }
+ { "Current log file size" st_lg_size }
+ { "Log file records written" st_record }
+ { "Mbytes written" st_w_mbytes }
+ { "Bytes written (over Mb)" st_w_bytes }
+ { "Mbytes written since checkpoint" st_wc_mbytes }
+ { "Bytes written (over Mb) since checkpoint"
+ st_wc_bytes }
+ { "Times log written" st_wcount }
+ { "Times log written because cache filled up"
+ st_wcount_fill }
+ { "Times log read from disk" st_rcount }
+ { "Times log flushed to disk" st_scount }
+ { "Current log file number" st_cur_file }
+ { "Current log file offset" st_cur_offset }
+ { "On-disk log file number" st_disk_file }
+ { "On-disk log file offset" st_disk_offset }
+ { "Max commits in a log flush" st_maxcommitperflush }
+ { "Min commits in a log flush" st_mincommitperflush }
+ { "Number of region lock waits" st_region_wait }
+ { "Number of region lock nowaits" st_region_nowait }
+ }
+ set doc_list [list st_magic st_version st_mode st_lg_bsize st_lg_size \
+ st_record st_w_mbytes st_w_bytes st_wc_mbytes st_wc_bytes \
+ st_wcount st_wcount_fill st_rcount st_scount st_cur_file \
+ st_cur_offset st_disk_file st_disk_offset st_maxcommitperflush \
+ st_mincommitperflush st_regsize st_region_wait st_region_nowait ]
+ env017_stat_check \
+ $map_list $doc_list $check_type $stat_method $envargs
+}
+
+# Check the lock stat field.
+proc env017_lock_stat { } {
+ puts "\nEnv017: Check the lock stat field"
+ set check_type lock_stat_check
+ set stat_method lock_stat
+ set envargs {-create -lock}
+ set map_list {
+ { "Region size" st_regsize }
+ { "Last allocated locker ID" st_id }
+ { "Current maximum unused locker ID" st_cur_maxid }
+ { "Maximum locks" st_maxlocks }
+ { "Maximum lockers" st_maxlockers }
+ { "Maximum objects" st_maxobjects }
+ { "Lock modes" st_nmodes }
+ { "Number of lock table partitions" st_partitions }
+ { "Current number of locks" st_nlocks }
+ { "Maximum number of locks so far" st_maxnlocks }
+ { "Maximum number of locks in any hash bucket"
+ st_maxhlocks }
+ { "Maximum number of lock steals for an empty partition"
+ st_locksteals }
+ { "Maximum number lock steals in any partition"
+ st_maxlsteals }
+ { "Current number of lockers" st_nlockers }
+ { "Maximum number of lockers so far" st_maxnlockers }
+ { "Current number of objects" st_nobjects }
+ { "Maximum number of objects so far" st_maxnobjects }
+ { "Maximum number of objects in any hash bucket"
+ st_maxhobjects }
+ { "Maximum number of object steals for an empty partition"
+ st_objectsteals }
+ { "Maximum number object steals in any partition"
+ st_maxosteals }
+ { "Lock requests" st_nrequests }
+ { "Lock releases" st_nreleases }
+ { "Lock upgrades" st_nupgrade }
+ { "Lock downgrades" st_ndowngrade }
+ { "Number of conflicted locks for which we waited"
+ st_lock_wait }
+ { "Number of conflicted locks for which we did not wait"
+ st_lock_nowait }
+ { "Deadlocks detected" st_ndeadlocks }
+ { "Number of region lock waits" st_region_wait }
+ { "Number of region lock nowaits" st_region_nowait }
+ { "Number of object allocation waits" st_objs_wait }
+ { "Number of object allocation nowaits" st_objs_nowait }
+ { "Number of locker allocation waits" st_lockers_wait }
+ { "Number of locker allocation nowaits" st_lockers_nowait }
+ { "Maximum hash bucket length" st_hash_len }
+ { "Lock timeout value" st_locktimeout }
+ { "Number of lock timeouts" st_nlocktimeouts }
+ { "Transaction timeout value" st_txntimeout }
+ { "Number of transaction timeouts" st_ntxntimeouts }
+ { "Number lock partition mutex waits" st_part_wait }
+ { "Number lock partition mutex nowaits" st_part_nowait }
+ { "Maximum number waits on any lock partition mutex"
+ st_part_max_wait }
+ { "Maximum number nowaits on any lock partition mutex"
+ st_part_max_nowait }
+ }
+ set doc_list [list st_id st_cur_maxid st_nmodes st_maxlocks \
+ st_maxlockers st_maxobjects st_partitions st_nlocks st_maxnlocks \
+ st_maxhlocks st_locksteals st_maxlsteals st_nlockers \
+ st_maxnlockers st_nobjects st_maxnobjects st_maxhobjects \
+ st_objectsteals st_maxosteals st_nrequests st_nreleases st_nupgrade\
+ st_ndowngrade st_lock_wait st_lock_nowait st_ndeadlocks \
+ st_locktimeout st_nlocktimeouts st_txntimeout st_ntxntimeouts \
+ st_objs_wait st_objs_nowait st_lockers_wait st_lockers_nowait \
+ st_hash_len st_regsize st_part_wait st_part_nowait st_part_max_wait\
+ st_part_max_nowait st_region_wait st_region_nowait]
+ env017_stat_check \
+ $map_list $doc_list $check_type $stat_method $envargs
+}
+
+# Check the txn stat field.
+proc env017_txn_stat { } {
+ puts "\nEnv017: Check the transaction stat field"
+ set check_type txn_stat_check
+ set stat_method txn_stat
+ set envargs {-create -txn}
+ set map_list {
+ { "Region size" st_regsize }
+ { "LSN of last checkpoint" st_last_ckp }
+ { "Time of last checkpoint" st_time_ckp }
+ { "Last txn ID allocated" st_last_txnid }
+ { "Maximum txns" st_maxtxns }
+ { "Number aborted txns" st_naborts }
+ { "Number txns begun" st_nbegins }
+ { "Number committed txns" st_ncommits }
+ { "Number active txns" st_nactive }
+ { "Number of snapshot txns" st_nsnapshot }
+ { "Number restored txns" st_nrestores }
+ { "Maximum active txns" st_maxnactive }
+ { "Maximum snapshot txns" st_maxnsnapshot }
+ { "Number of region lock waits" st_region_wait }
+ { "Number of region lock nowaits" st_region_nowait }
+ }
+ set doc_list [list st_last_ckp st_time_ckp st_last_txnid st_maxtxns \
+ st_nactive st_nsnapshot st_maxnactive st_maxnsnapshot st_nbegins \
+ st_naborts st_ncommits st_nrestores st_regsize st_region_wait \
+ st_region_nowait ]
+ env017_stat_check \
+ $map_list $doc_list $check_type $stat_method $envargs
+}
+
+#Check the mutex stat field.
+proc env017_mutex_stat { } {
+ puts "\nEnv017: Check the mutex stat field"
+ set check_type mutex_stat_check
+ set stat_method mutex_stat
+ set envargs {-create}
+ set map_list {
+ { "Mutex align" st_mutex_align }
+ { "Mutex TAS spins" st_mutex_tas_spins }
+ { "Mutex count" st_mutex_cnt }
+ { "Free mutexes" st_mutex_free }
+ { "Mutexes in use" st_mutex_inuse }
+ { "Max in use" st_mutex_inuse_max }
+ { "Mutex region size" st_regsize }
+ { "Number of region waits" st_region_wait }
+ { "Number of region no waits" st_region_nowait }
+ }
+ set doc_list [list st_mutex_align st_mutex_tas_spins st_mutex_cnt \
+ st_mutex_free st_mutex_inuse st_mutex_inuse_max st_regsize \
+ st_region_wait st_region_nowait ]
+
+ env017_stat_check \
+ $map_list $doc_list $check_type $stat_method $envargs
+}
+
+# Check the rep stat field.
+proc env017_rep_stat { } {
+ puts "\nEnv017: Check the replication stat field"
+ set check_type rep_stat_check
+ set stat_method rep_stat
+ set envargs {-create -rep -log -txn}
+ set map_list {
+ { "Role" st_status}
+ { "Next LSN expected" st_next_lsn }
+ { "First missed LSN" st_waiting_lsn }
+ { "Maximum permanent LSN" st_max_perm_lsn }
+ { "Bulk buffer fills" st_bulk_fills }
+ { "Bulk buffer overflows" st_bulk_overflows }
+ { "Bulk records stored" st_bulk_records }
+ { "Bulk buffer transfers" st_bulk_transfers }
+ { "Client service requests" st_client_svc_req }
+ { "Client service req misses" st_client_svc_miss }
+ { "Client rerequests" st_client_rerequests }
+ { "Duplicate master conditions" st_dupmasters }
+ { "Environment ID" st_env_id }
+ { "Environment priority" st_env_priority }
+ { "Generation number" st_gen }
+ { "Election generation number" st_egen }
+ { "Startup complete" st_startup_complete }
+ { "Duplicate log records received" st_log_duplicated }
+ { "Current log records queued" st_log_queued }
+ { "Maximum log records queued" st_log_queued_max }
+ { "Total log records queued" st_log_queued_total }
+ { "Log records received" st_log_records }
+ { "Log records requested" st_log_requested }
+ { "Master environment ID" st_master }
+ { "Master changes" st_master_changes }
+ { "Messages with bad generation number" st_msgs_badgen }
+ { "Messages processed" st_msgs_processed }
+ { "Messages ignored for recovery" st_msgs_recover }
+ { "Message send failures" st_msgs_send_failures }
+ { "Messages sent" st_msgs_sent }
+ { "New site messages" st_newsites }
+ { "Number of sites in replication group" st_nsites }
+ { "Transmission limited" st_nthrottles }
+ { "Outdated conditions" st_outdated }
+ { "Transactions applied" st_txns_applied }
+ { "Next page expected" st_next_pg }
+ { "First missed page" st_waiting_pg }
+ { "Duplicate pages received" st_pg_duplicated }
+ { "Pages received" st_pg_records }
+ { "Pages requested" st_pg_requested }
+ { "Elections held" st_elections }
+ { "Elections won" st_elections_won }
+ { "Election phase" st_election_status }
+ { "Election winner" st_election_cur_winner }
+ { "Election generation number" st_election_gen }
+ { "Election max LSN" st_election_lsn }
+ { "Election sites" st_election_nsites }
+ { "Election nvotes" st_election_nvotes }
+ { "Election priority" st_election_priority }
+ { "Election tiebreaker" st_election_tiebreaker }
+ { "Election votes" st_election_votes }
+ { "Election seconds" st_election_sec }
+ { "Election usecs" st_election_usec }
+ { "Start-sync operations delayed"
+ st_startsync_delayed }
+ { "Maximum lease seconds" st_max_lease_sec }
+ { "Maximum lease usecs" st_max_lease_usec }
+ { "File fail cleanups done" st_filefail_cleanups }
+ }
+ set doc_list [list st_bulk_fills st_bulk_overflows st_bulk_records \
+ st_bulk_transfers st_client_rerequests st_client_svc_miss \
+ st_client_svc_req st_dupmasters st_egen st_election_cur_winner \
+ st_election_gen st_election_lsn st_election_nsites \
+ st_election_nvotes st_election_priority st_election_sec \
+ st_election_status st_election_tiebreaker st_election_usec \
+ st_election_votes st_elections st_elections_won st_env_id \
+ st_env_priority st_filefail_cleanups st_gen st_log_duplicated \
+ st_log_queued st_log_queued_max st_log_queued_total st_log_records \
+ st_log_requested st_master st_master_changes st_max_lease_sec \
+ st_max_lease_usec st_max_perm_lsn st_msgs_badgen st_msgs_processed\
+ st_msgs_recover st_msgs_send_failures st_msgs_sent st_newsites \
+ st_next_lsn st_next_pg st_nsites st_nthrottles st_outdated \
+ st_pg_duplicated st_pg_records st_pg_requested \
+ st_startsync_delayed st_startup_complete st_status st_txns_applied\
+ st_waiting_lsn st_waiting_pg ]
+ env017_stat_check \
+ $map_list $doc_list $check_type $stat_method $envargs
+}
+
+# Check the repmgr stat field.
+proc env017_repmgr_stat { } {
+ puts "\nEnv017: Check the repmgr stat field"
+ set check_type repmgr_stat_check
+ set stat_method repmgr_stat
+ set envargs {-create -txn -rep}
+ set map_list {
+ { "Acknowledgement failures" st_perm_failed }
+ { "Messages delayed" st_msgs_queued}
+ { "Messages discarded" st_msgs_dropped}
+ { "Connections dropped" st_connection_drop}
+ { "Failed re-connects" st_connect_fail}
+ }
+ set doc_list [list st_perm_failed st_msgs_queued st_msgs_dropped \
+ st_connection_drop st_connect_fail ]
+ env017_stat_check \
+ $map_list $doc_list $check_type $stat_method $envargs
+}
+
+# Check the mpool stat field.
+proc env017_mpool_stat { } {
+ puts "\nEnv017: Check the mpool stat field"
+ set check_type mpool_stat_check
+ set stat_method mpool_stat
+ set envargs {-create}
+ set map_list {
+ { "Cache size (gbytes)" st_gbytes }
+ { "Cache size (bytes)" st_bytes }
+ { "Number of caches" st_ncache }
+ { "Maximum number of caches" st_max_ncache }
+ { "Region size" st_regsize }
+ { "Maximum memory-mapped file size" st_mmapsize }
+ { "Maximum open file descriptors" st_maxopenfd }
+ { "Maximum sequential buffer writes" st_maxwrite }
+ { "Sleep after writing maximum buffers" st_maxwrite_sleep }
+ { "Pages mapped into address space" st_map }
+ { "Cache hits" st_cache_hit }
+ { "Cache misses" st_cache_miss }
+ { "Pages created" st_page_create }
+ { "Pages read in" st_page_in }
+ { "Pages written" st_page_out }
+ { "Clean page evictions" st_ro_evict }
+ { "Dirty page evictions" st_rw_evict }
+ { "Dirty pages trickled" st_page_trickle }
+ { "Cached pages" st_pages }
+ { "Cached clean pages" st_page_clean }
+ { "Cached dirty pages" st_page_dirty }
+ { "Hash buckets" st_hash_buckets }
+ { "Default pagesize" st_pagesize }
+ { "Hash lookups" st_hash_searches }
+ { "Longest hash chain found" st_hash_longest }
+ { "Hash elements examined" st_hash_examined }
+ { "Number of hash bucket nowaits" st_hash_nowait }
+ { "Number of hash bucket waits" st_hash_wait }
+ { "Maximum number of hash bucket nowaits"
+ st_hash_max_nowait }
+ { "Maximum number of hash bucket waits" st_hash_max_wait }
+ { "Number of region lock nowaits" st_region_nowait }
+ { "Number of region lock waits" st_region_wait }
+ { "Buffers frozen" st_mvcc_frozen }
+ { "Buffers thawed" st_mvcc_thawed }
+ { "Frozen buffers freed" st_mvcc_freed }
+ { "Page allocations" st_alloc }
+ { "Buckets examined during allocation" st_alloc_buckets }
+ { "Maximum buckets examined during allocation"
+ st_alloc_max_buckets }
+ { "Pages examined during allocation" st_alloc_pages }
+ { "Maximum pages examined during allocation"
+ st_alloc_max_pages }
+ { "Threads waiting on buffer I/O" st_io_wait}
+ { "Number of syncs interrupted" st_sync_interrupted}
+ }
+ set doc_list [list st_gbytes st_bytes st_ncache st_max_ncache \
+ st_regsize st_mmapsize st_maxopenfd st_maxwrite st_maxwrite_sleep \
+ st_map st_cache_hit st_cache_miss st_page_create st_page_in \
+ st_page_out st_ro_evict st_rw_evict st_page_trickle st_pages \
+ st_page_clean st_page_dirty st_hash_buckets st_pagesize \
+ st_hash_searches \
+ st_hash_longest st_hash_examined st_hash_nowait st_hash_wait \
+ st_hash_max_nowait st_hash_max_wait st_region_wait \
+ st_region_nowait st_mvcc_frozen st_mvcc_thawed st_mvcc_freed \
+ st_alloc st_alloc_buckets st_alloc_max_buckets st_alloc_pages \
+ st_alloc_max_pages st_io_wait st_sync_interrupted ]
+ env017_stat_check \
+ $map_list $doc_list $check_type $stat_method $envargs
+}
+
+# Check the db stat field.
+proc env017_db_stat { } {
+ puts "\nEnv017: Check the db stat field"
+ set hash_map_list {
+ { "Magic" hash_magic }
+ { "Version" hash_version }
+ { "Page size" hash_pagesize }
+ { "Page count" hash_pagecnt }
+ { "Number of keys" hash_nkeys }
+ { "Number of records" hash_ndata }
+ { "Fill factor" hash_ffactor }
+ { "Buckets" hash_buckets }
+ { "Free pages" hash_free }
+ { "Bytes free" hash_bfree }
+ { "Number of big pages" hash_bigpages }
+ { "Big pages bytes free" hash_big_bfree }
+ { "Overflow pages" hash_overflows }
+ { "Overflow bytes free" hash_ovfl_free }
+ { "Duplicate pages" hash_dup }
+ { "Duplicate pages bytes free" hash_dup_free }
+ { "Flags" flags }
+ }
+ set queue_map_list {
+ { "Magic" qs_magic }
+ { "Version" qs_version }
+ { "Page size" qs_pagesize }
+ { "Extent size" qs_extentsize }
+ { "Number of keys" qs_nkeys }
+ { "Number of records" qs_ndata }
+ { "Record length" qs_re_len }
+ { "Record pad" qs_re_pad }
+ { "First record number" qs_first_recno }
+ { "Last record number" qs_cur_recno }
+ { "Number of pages" qs_pages }
+ { "Bytes free" qs_pgfree}
+ { "Flags" flags }
+ }
+ set btree_map_list {
+ { "Magic" bt_magic }
+ { "Version" bt_version }
+ { "Number of keys" bt_nkeys }
+ { "Number of records" bt_ndata }
+ { "Minimum keys per page" bt_minkey }
+ { "Fixed record length" bt_re_len }
+ { "Record pad" bt_re_pad }
+ { "Page size" bt_pagesize }
+ { "Page count" bt_pagecnt }
+ { "Levels" bt_levels }
+ { "Internal pages" bt_int_pg }
+ { "Leaf pages" bt_leaf_pg }
+ { "Duplicate pages" bt_dup_pg }
+ { "Overflow pages" bt_over_pg }
+ { "Empty pages" bt_empty_pg }
+ { "Pages on freelist" bt_free }
+ { "Internal pages bytes free" bt_int_pgfree }
+ { "Leaf pages bytes free" bt_leaf_pgfree }
+ { "Duplicate pages bytes free" bt_dup_pgfree }
+ { "Bytes free in overflow pages" bt_over_pgfree }
+ { "Flags" flags }
+ }
+ set hash_doc_list [list hash_magic hash_version hash_nkeys hash_ndata \
+ hash_pagecnt hash_pagesize hash_ffactor hash_buckets hash_free \
+ hash_bfree hash_bigpages hash_big_bfree hash_overflows \
+ hash_ovfl_free hash_dup hash_dup_free flags]
+
+ set btree_doc_list [list bt_magic bt_version bt_nkeys bt_ndata \
+ bt_pagecnt bt_pagesize bt_minkey bt_re_len bt_re_pad bt_levels \
+ bt_int_pg bt_leaf_pg bt_dup_pg bt_over_pg bt_empty_pg bt_free \
+ bt_int_pgfree bt_leaf_pgfree bt_dup_pgfree bt_over_pgfree flags ]
+
+ set queue_doc_list [list qs_magic qs_version qs_nkeys qs_ndata \
+ qs_pagesize qs_extentsize qs_pages qs_re_len qs_re_pad qs_pgfree \
+ qs_first_recno qs_cur_recno flags ]
+
+ # Check the hash db stat field.
+ puts "\tEnv017: Check the hash db stat"
+ env017_dbstat_check \
+ $hash_map_list $hash_doc_list {hash_db_stat_check} {-create -hash}
+
+ # Check the queue db stat field.
+ puts "\tEnv017: Check the queue db stat"
+ env017_dbstat_check \
+ $queue_map_list $queue_doc_list {queue_db_stat_check} \
+ {-create -queue}
+
+ # Check the btree/recno db stat field.
+ puts "\tEnv017: Check the btree/recno db stat"
+ env017_dbstat_check \
+ $btree_map_list $btree_doc_list {btree_db_stat_check} \
+ {-create -btree}
+}
+
+
+# Check the sequence stat field.
+proc env017_seq_stat { } {
+ puts "\nEnv017: Check the sequence stat field"
+ source ./include.tcl
+ env_cleanup $testdir
+ set file1 db1.db
+ set db1 [berkdb open -create -btree $testdir/$file1]
+ error_check_good is_valid_db [is_valid_db $db1] TRUE
+ set seq [berkdb sequence -create -min 0 -max 1024768 $db1 seq_key1]
+ error_check_good is_valid_seq [is_valid_seq $seq] TRUE
+ set stat_list [$seq stat]
+ set map_list {
+ { "Wait" st_wait }
+ { "No wait" st_nowait }
+ { "Current" st_current }
+ { "Cached" st_value }
+ { "Max Cached" st_last_value }
+ { "Min" st_min }
+ { "Max" st_max }
+ { "Cache size" st_cache_size}
+ { "Flags" st_flags}
+ }
+ set doc_list [list st_wait st_nowait st_current st_value \
+ st_last_value st_min st_max st_cache_size st_flags]
+ env017_do_check $map_list $stat_list $doc_list {seq_stat}
+ error_check_good "$seq close" [$seq close] 0
+ error_check_good "$db1 close" [$db1 close] 0
+}
+
+# This is common proc for the stat method called by env handle.
+proc env017_stat_check { map_list doc_list check_type stat_method \
+ {envargs {}} } {
+ source ./include.tcl
+ env_cleanup $testdir
+ set env [eval berkdb_env_noerr $envargs -home $testdir]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+ set stat_list [$env $stat_method]
+ env017_do_check $map_list $stat_list $doc_list $check_type
+ error_check_good "$env close" [$env close] 0
+}
+
+# This is common proc for db stat.
+proc env017_dbstat_check { map_list doc_list check_type {dbargs {}} } {
+ source ./include.tcl
+ env_cleanup $testdir
+ set filename "db1.db"
+ set db [eval berkdb_open_noerr $dbargs $testdir/$filename]
+ error_check_good is_valid_db [is_valid_db $db] TRUE
+ set stat_list [$db stat]
+ env017_do_check $map_list $stat_list $doc_list $check_type
+ error_check_good "$db close" [$db close] 0
+}
+
+# This proc does the actual checking job.
+proc env017_do_check { map_list stat_list doc_list check_type } {
+ # Check if all the items in the stat_list have the corresponding
+ # item in doc_list.
+ foreach l $map_list {
+ set field_map([lindex $l 0]) [lindex $l 1]
+ }
+ puts "\tEnv017: Check from stat_list"
+ set res_stat_list {}
+ foreach item $stat_list {
+ puts "\t\tEnv017: Checking item [lindex $item 0]"
+ if {![info exists field_map([lindex $item 0])]} {
+ lappend res_stat_list [lindex $item 0]
+ continue
+ }
+ set cur_field $field_map([lindex $item 0])
+ if {[lsearch -exact $doc_list $cur_field] == -1} {
+ lappend res_stat_list [lindex $item 0]
+ }
+ }
+ if {[llength $res_stat_list]>0} {
+ puts -nonewline "FAIL: in stat_list of $check_type, "
+ puts "Mismatch Items: $res_stat_list"
+ }
+
+ # Check if all the items in the doc_list have the corresponding
+ # record in the stat_list.
+ foreach l $map_list {
+ set field_map([lindex $l 1]) [lindex $l 0]
+ }
+
+ set stat_field_list {}
+
+ foreach item $stat_list {
+ lappend stat_field_list [lindex $item 0]
+ }
+
+ set res_doc_list {}
+ puts "\tEnv017: Check from doc_list"
+ foreach item $doc_list {
+ puts "\t\tEnv017: Checking item [lindex $item 0]"
+ if {![info exists field_map([lindex $item 0])]} {
+ lappend res_doc_list [lindex $item 0]
+ continue
+ }
+ set cur_field $field_map([lindex $item 0])
+ if {[lsearch -exact $stat_field_list $cur_field] == -1} {
+ lappend res_doc_list [lindex $item 0]
+ }
+ }
+ if {[llength $res_doc_list]>0} {
+ puts -nonewline "FAIL: in doc_list of $check_type, "
+ puts "Mismatch Items: $res_doc_list"
+ }
+}
+
diff --git a/db-4.8.30/test/env018.tcl b/db-4.8.30/test/env018.tcl
new file mode 100644
index 0000000..082e8c2
--- /dev/null
+++ b/db-4.8.30/test/env018.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST env018
+# TEST Test getters when joining an env. When a second handle is
+# TEST opened on an existing env, get_open_flags needs to return
+# TEST the correct flags to the second handle so it knows what sort
+# TEST of environment it's just joined.
+# TEST
+# TEST For several different flags to env_open, open an env. Open
+# TEST a second handle on the same env, get_open_flags and verify
+# TEST the flag is returned.
+proc env018 { } {
+ source ./include.tcl
+ set tnum "018"
+
+ puts "Env$tnum: Test of join_env and getters."
+
+ # Skip for HP-UX where a second handle on an env is not allowed.
+ if { $is_hp_test == 1 } {
+ puts "Skipping env$tnum for HP-UX."
+ return
+ }
+
+ # Set up flags to use in opening envs.
+ set flags { -cdb -lock -log -txn }
+
+ foreach flag $flags {
+ env_cleanup $testdir
+
+ puts "\t\tEnv$tnum.a: Open env with $flag."
+ set e1 [eval {berkdb_env} -create -home $testdir $flag]
+ error_check_good e1_open [is_valid_env $e1] TRUE
+
+ puts "\t\tEnv$tnum.b: Join the env."
+ set e2 [eval {berkdb_env} -home $testdir]
+ error_check_good e2_open [is_valid_env $e2] TRUE
+
+ # Get open flags for both envs.
+ set e1_flags_returned [$e1 get_open_flags]
+ set e2_flags_returned [$e2 get_open_flags]
+
+ # Test that the flag given to the original env is
+ # returned by a call to the second env.
+ puts "\t\tEnv$tnum.c: Check that flag is returned."
+ error_check_good flag_is_returned \
+ [is_substr $e2_flags_returned $flag] 1
+
+ # Clean up.
+ error_check_good e1_close [$e1 close] 0
+ error_check_good e2_close [$e2 close] 0
+ }
+}
+
diff --git a/db-4.8.30/test/envscript.tcl b/db-4.8.30/test/envscript.tcl
new file mode 100644
index 0000000..10f33a7
--- /dev/null
+++ b/db-4.8.30/test/envscript.tcl
@@ -0,0 +1,100 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Envscript -- for use with env012, DB_REGISTER test.
+# Usage: envscript testdir testfile putget key data recover failchk wait
+# testdir: directory containing the env we are joining.
+# testfile: file name for database.
+# putget: What to do in the db: put, get, or loop.
+# key: key to store or get
+# data: data to store or get
+# recover: include or omit the -recover flag in opening the env.
+# failchk: include or omit the -failchk flag in opening the env. 2 options
+# here, one with just -failchk and one with both -failchk & -isalive
+# wait: how many seconds to wait before closing env at end of test.
+
+source ./include.tcl
+source $test_path/testutils.tcl
+
+set usage "envscript testdir testfile putget key data recover failchk wait"
+
+# Verify usage
+if { $argc != 8 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set testdir [ lindex $argv 0 ]
+set testfile [ lindex $argv 1 ]
+set putget [lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+set recover [ lindex $argv 5 ]
+set failchk [lindex $argv 6 ]
+set wait [ lindex $argv 7 ]
+
+set flag1 {}
+if { $recover == "RECOVER" } {
+ set flag1 " -recover "
+}
+
+set flag2 {}
+if {$failchk == "FAILCHK0" } {
+ set flag2 " -failchk "
+}
+if {$failchk == "FAILCHK1"} {
+ set flag2 " -failchk -isalive my_isalive -reg_timeout 100 "
+}
+
+# Open and register environment.
+if {[catch {eval {berkdb_env} \
+ -create -home $testdir -txn -register $flag1 $flag2} dbenv]} {
+ puts "FAIL: opening env returned $dbenv"
+}
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+# Open database, put or get, close database.
+if {[catch {eval {berkdb_open} \
+ -create -auto_commit -btree -env $dbenv $testfile} db]} {
+ puts "FAIL: opening db returned $db"
+}
+error_check_good dbopen [is_valid_db $db] TRUE
+
+switch $putget {
+ PUT {
+ set txn [$dbenv txn]
+ error_check_good db_put [eval {$db put} -txn $txn $key $data] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ GET {
+ set ret [$db get $key]
+ error_check_good db_get [lindex [lindex $ret 0] 1] $data
+ }
+ LOOP {
+ while { 1 } {
+ set txn [$dbenv txn]
+ error_check_good db_put \
+ [eval {$db put} -txn $txn $key $data] 0
+ error_check_good txn_commit [$txn commit] 0
+ tclsleep 1
+ }
+ }
+ default {
+ puts "FAIL: Unrecognized putget value $putget"
+ }
+}
+
+error_check_good db_close [$db close] 0
+
+# Wait.
+while { $wait > 0 } {
+puts "waiting ... wait is $wait"
+ tclsleep 1
+ incr wait -1
+}
+
+error_check_good env_close [$dbenv close] 0
diff --git a/db-4.8.30/test/fop001.tcl b/db-4.8.30/test/fop001.tcl
new file mode 100644
index 0000000..2fc099d
--- /dev/null
+++ b/db-4.8.30/test/fop001.tcl
@@ -0,0 +1,243 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop001.tcl
+# TEST Test file system operations, combined in a transaction. [#7363]
+proc fop001 { method { inmem 0 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # The variable inmem determines whether the test is being
+ # run with regular named databases or named in-memory databases.
+ if { $inmem == 0 } {
+ set tnum "001"
+ set string "regular named databases"
+ set operator do_op
+ } else {
+ set tnum "007"
+ set string "in-memory named databases"
+ set operator do_inmem_op
+ }
+
+ puts "\nFop$tnum: ($method)\
+ Two file system ops in one transaction for $string."
+
+ set exists {a b}
+ set noexist {foo bar}
+ set open {}
+ set cases {}
+ set ops {rename remove open open_create open_excl truncate}
+
+ # Set up all sensible two-op cases (op1 succeeds).
+ foreach retval { 0 "file exists" "no such file" } {
+ foreach op1 {rename remove open open_excl \
+ open_create truncate} {
+ foreach op2 $ops {
+ append cases " " [create_tests $op1 $op2 \
+ $exists $noexist $open $retval]
+ }
+ }
+ }
+
+ # Set up evil two-op cases (op1 fails). Omit open_create
+ # and truncate from op1 list -- open_create always succeeds
+ # and truncate requires a successful open.
+ foreach retval { 0 "file exists" "no such file" } {
+ foreach op1 { rename remove open open_excl } {
+ foreach op2 $ops {
+ append cases " " [create_badtests $op1 $op2 \
+ $exists $noexist $open $retval]
+ }
+ }
+ }
+
+ # The structure of each case is:
+ # {{op1 {names1} result end1} {op2 {names2} result}}
+ # A result of "0" indicates no error is expected.
+ # Otherwise, the result is the expected error message.
+ #
+ # The "end1" variable indicates whether the first txn
+ # ended with an abort or a commit, and is not used
+ # in this test.
+ #
+ # Comment this loop out to remove the list of cases.
+# set i 1
+# foreach case $cases {
+# puts "\tFop$tnum:$i: $case"
+# incr i
+# }
+
+ set testid 0
+
+ # Run all the cases
+ foreach case $cases {
+ env_cleanup $testdir
+ incr testid
+
+ # Extract elements of the case
+ set op1 [lindex [lindex $case 0] 0]
+ set names1 [lindex [lindex $case 0] 1]
+ set res1 [lindex [lindex $case 0] 2]
+
+ set op2 [lindex [lindex $case 1] 0]
+ set names2 [lindex [lindex $case 1] 1]
+ set res2 [lindex [lindex $case 1] 2]
+
+ puts "\tFop$tnum.$testid: $op1 ($names1), then $op2 ($names2)."
+
+ # The variable 'when' describes when to resolve a txn --
+ # before or after closing any open databases.
+ foreach when { before after } {
+
+ # Create transactional environment.
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # Create two databases, dba and dbb.
+ if { $inmem == 0 } {
+ set dba [eval {berkdb_open -create} $omethod \
+ $args -env $env -auto_commit a]
+ } else {
+ set dba [eval {berkdb_open -create} $omethod \
+ $args -env $env -auto_commit { "" a }]
+ }
+ error_check_good dba_open [is_valid_db $dba] TRUE
+ error_check_good dba_put [$dba put 1 a] 0
+ error_check_good dba_close [$dba close] 0
+
+ if { $inmem == 0 } {
+ set dbb [eval {berkdb_open -create} $omethod \
+ $args -env $env -auto_commit b]
+ } else {
+ set dbb [eval {berkdb_open -create} $omethod \
+ $args -env $env -auto_commit { "" b }]
+ }
+ error_check_good dbb_open [is_valid_db $dbb] TRUE
+ error_check_good dbb_put [$dbb put 1 b] 0
+ error_check_good dbb_close [$dbb close] 0
+
+ # The variable 'end' describes how to resolve the txn.
+ # We run the 'abort' first because that leaves the env
+ # properly set up for the 'commit' test.
+ foreach end {abort commit} {
+
+ puts "\t\tFop$tnum.$testid:\
+ $end $when closing database."
+
+ # Start transaction
+ set txn [$env txn]
+
+ # Execute and check operation 1
+ set result1 [$operator \
+ $omethod $op1 $names1 $txn $env $args]
+ if { $res1 == 0 } {
+ error_check_good \
+ op1_should_succeed $result1 $res1
+ } else {
+ set error [extract_error $result1]
+ error_check_good \
+ op1_wrong_failure $error $res1
+ }
+
+ # Execute and check operation 2
+ set result2 [$operator \
+ $omethod $op2 $names2 $txn $env $args]
+ if { $res2 == 0 } {
+ error_check_good \
+ op2_should_succeed $result2 $res2
+ } else {
+ set error [extract_error $result2]
+ error_check_good \
+ op2_wrong_failure $error $res2
+ }
+
+ if { $when == "before" } {
+ error_check_good txn_$end [$txn $end] 0
+
+ # If the txn was aborted, we still
+ # have the original two databases.
+ if { $end == "abort" } {
+ database_exists \
+ $inmem $testdir a
+ database_exists \
+ $inmem $testdir b
+ }
+ close_db_handles
+ } else {
+ close_db_handles
+ error_check_good txn_$end [$txn $end] 0
+
+ if { $end == "abort" } {
+ database_exists \
+ $inmem $testdir a
+ database_exists \
+ $inmem $testdir b
+ }
+ }
+ }
+
+ # Clean up for next case
+ error_check_good env_close [$env close] 0
+ error_check_good envremove \
+ [berkdb envremove -home $testdir] 0
+ env_cleanup $testdir
+ }
+ }
+}
+
+proc database_exists { inmem testdir name } {
+ if { $inmem == 1 } {
+ error_check_good db_exists [inmem_exists $testdir $name] 1
+ } else {
+ error_check_good db_exists [file exists $testdir/$name] 1
+ }
+}
+
+# This is a real hack. We need to figure out if an in-memory named
+# file exists. In a perfect world we could use mpool stat. Unfortunately,
+# mpool_stat returns files that have deadfile set and we need to not consider
+# those files to be meaningful. So, we are parsing the output of db_stat -MA
+# (I told you this was a hack) If we ever change the output, this is going
+# to break big time. Here is what we assume:
+# A file is represented by: File #N name
+# The last field printed for a file is Flags
+# If the file is dead, deadfile will show up in the flags
+proc inmem_exists { dir filename } {
+ source ./include.tcl
+ set infile 0
+ set islive 0
+ set name ""
+ set s [exec $util_path/db_stat -MA -h $dir]
+ foreach i $s {
+ if { $i == "File" } {
+ set infile 1
+ set islive 1
+ set name ""
+ } elseif { $i == "Flags" } {
+ set infile 0
+ if { $name != "" && $islive } {
+ return 1
+ }
+ } elseif { $infile != 0 } {
+ incr infile
+ }
+
+ if { $islive && $i == "deadfile" } {
+ set islive 0
+ }
+
+ if { $infile == 3 } {
+ if { $i == $filename } {
+ set name $filename
+ }
+ }
+ }
+
+ return 0
+}
+
diff --git a/db-4.8.30/test/fop002.tcl b/db-4.8.30/test/fop002.tcl
new file mode 100644
index 0000000..9a1aa54
--- /dev/null
+++ b/db-4.8.30/test/fop002.tcl
@@ -0,0 +1,135 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop002.tcl
+# TEST Test file system operations in the presence of bad permissions.
+proc fop002 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+ puts "\nFop002: ($method) File system ops and permissions."
+ if { $is_windows_test == 1 } {
+ puts "\tSkipping permissions test for Windows platform."
+ return
+ }
+
+ # Create database with -rw-r--r-- permissions.
+ set perms "0644"
+ set testfile $testdir/a.db
+ set destfile $testdir/b.db
+
+ set db [eval \
+ {berkdb_open -create} $omethod $args -mode $perms $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_put [$db put 1 [chop_data $method a]] 0
+ error_check_good db_close [$db close] 0
+
+ # Eliminate all read and write permission, and try to execute
+ # file ops. They should fail.
+ set res [exec chmod 0000 $testfile]
+ error_check_good remove_permissions [llength $res] 0
+ # Put remove last on the list of ops since it should succeed
+ # at the end of the test, removing the test file.
+ set ops [list open_create open rename remove]
+ set rdonly 0
+
+ puts "\tFop002.a: Test with neither read nor write permission."
+ foreach op $ops {
+ puts "\t\tFop002.a: Testing $op for failure."
+ switch $op {
+ open {
+ test_$op $testfile $omethod $args $rdonly 1
+ }
+ rename {
+ test_$op $testfile $destfile 1
+ }
+ open_create {
+ test_$op $testfile $omethod $args 1
+ }
+ remove {
+ test_$op $testfile 1
+ }
+ }
+ }
+
+ # Change permissions to read-only.
+ puts "\tFop002.b: Test with read-only permission."
+ set rdonly 1
+
+ set res [exec chmod 0444 $testfile]
+ error_check_good set_readonly [llength $res] 0
+
+ foreach op $ops {
+ puts "\t\tFop002.b: Testing $op for success."
+ switch $op {
+ open {
+ test_$op $testfile $omethod $args $rdonly 0
+ }
+ rename {
+ test_$op $testfile $destfile 0
+ # Move it back so later tests work
+ test_$op $destfile $testfile 0
+ }
+ open_create {
+ puts "\t\tSkipping open_create with read-only."
+ }
+ remove {
+ test_$op $testfile 0
+ }
+ }
+ }
+}
+
+proc test_remove { testfile {expectfail 0} } {
+ catch { berkdb dbremove $testfile } res
+ if { $expectfail == 1 } {
+ error_check_good remove_err $res "db remove:permission denied"
+ } else {
+ error_check_good remove $res 0
+ }
+}
+
+proc test_rename { testfile destfile {expectfail 0} } {
+ catch { berkdb dbrename $testfile $destfile } res
+ if { $expectfail == 1 } {
+ error_check_good rename_err $res "db rename:permission denied"
+ } else {
+ error_check_good rename $res 0
+ }
+}
+
+proc test_open_create { testfile omethod args {expectfail 0} } {
+ set stat [catch { set db \
+ [eval {berkdb_open -create} $omethod $args $testfile]} res]
+ if { $expectfail == 1 } {
+ error_check_good open_create_err $res \
+ "db open:permission denied"
+ } else {
+ error_check_good open_create $stat 0
+ # Since we succeeded, we have to close the db.
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc test_open { testfile omethod args {readonly 0} {expectfail 0} } {
+ if { $readonly == 1 } {
+ set stat [catch {set db \
+ [eval {berkdb_open -rdonly} $omethod $args $testfile]} res]
+ } else {
+ set stat [catch {set db [berkdb_open $omethod $testfile]} res]
+ }
+ if { $expectfail == 1 } {
+ error_check_good open_err $res \
+ "db open:permission denied"
+ } else {
+ error_check_good db_open $stat 0
+ error_check_good db_close [$db close] 0
+ }
+}
+
diff --git a/db-4.8.30/test/fop003.tcl b/db-4.8.30/test/fop003.tcl
new file mode 100644
index 0000000..8785a3b
--- /dev/null
+++ b/db-4.8.30/test/fop003.tcl
@@ -0,0 +1,94 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop003
+# TEST
+# TEST Test behavior of create and truncate for compatibility
+# TEST with sendmail.
+# TEST 1. DB_TRUNCATE is not allowed with locking or transactions.
+# TEST 2. Can -create into zero-length existing file.
+# TEST 3. Can -create into non-zero-length existing file if and
+# TEST only if DB_TRUNCATE is specified.
+proc fop003 { method args } {
+ global errorInfo
+ source ./include.tcl
+ env_cleanup $testdir
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping fop003 for method $method"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set tnum "003"
+ set testfile fop$tnum.db
+ puts "Fop$tnum ($method): Test of required behavior for sendmail."
+
+ puts "\tFop$tnum.a: -truncate is not allowed within\
+ txn or locking env."
+ set envflags "lock txn"
+ foreach flag $envflags {
+ set env [berkdb_env_noerr -create -home $testdir -$flag]
+ set db [eval {berkdb_open_noerr -create} \
+ $omethod $args -env $env $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ catch {[berkdb_open_noerr -truncate $omethod $args -env $env \
+ $testfile]} res
+ error_check_good "$flag env not allowed" [is_substr $res \
+ "DB_TRUNCATE illegal with locking specified"] 1
+ error_check_good dbremove [$env dbremove $testfile] 0
+ error_check_good env_close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ }
+
+ puts "\tFop$tnum.b: -create is allowed on open of existing\
+ zero-length file."
+ # Create an empty file, then open with -create. We get an
+ # error message warning us that this does not look like a
+ # DB file, but the open should succeed.
+ set fd [open $testdir/foo w]
+ close $fd
+ catch {set db [eval \
+ {berkdb_open_noerr -create} $omethod $args $testdir/foo]} res
+ error_check_good open_fail [is_substr $errorInfo \
+ "unexpected file type or format"] 1
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ puts "\tFop$tnum.c: -create is ignored on open of existing\
+ non-zero-length file."
+ # Create a db file. Close and reopen with -create. Make
+ # sure that we still have the same file by checking the contents.
+ set key 1
+ set data "data"
+ set file "file.db"
+ set db [eval {berkdb_open -create $omethod} $args $testdir/$file]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_put [$db put $key [chop_data $method $data]] 0
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create $omethod} $args $testdir/$file]
+ error_check_good db_open2 [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good db_get \
+ [lindex [lindex $ret 0] 1] [pad_data $method $data]
+ error_check_good db_close2 [$db close] 0
+
+ puts "\tFop$tnum.d: -create is allowed on open -truncate of\
+ existing non-zero-length file."
+ # Use the file we already have with -truncate flag. The open
+ # should be successful, and when we query for the key that
+ # used to be there, we should get nothing.
+ set db [eval \
+ {berkdb_open -create -truncate $omethod} $args $testdir/$file]
+ error_check_good db_open3 [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good db_get [lindex [lindex $ret 0] 1] ""
+ error_check_good db_close3 [$db close] 0
+
+}
diff --git a/db-4.8.30/test/fop004.tcl b/db-4.8.30/test/fop004.tcl
new file mode 100644
index 0000000..14ce8b3
--- /dev/null
+++ b/db-4.8.30/test/fop004.tcl
@@ -0,0 +1,260 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop004
+# TEST Test of DB->rename(). (formerly test075)
+# TEST Test that files can be renamed from one directory to another.
+# TEST Test that files can be renamed using absolute or relative
+# TEST pathnames.
+proc fop004 { method { tnum "004" } args } {
+ global encrypt
+ global errorCode
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Fop$tnum: ($method $args): Test of DB->rename()"
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Skipping fop$tnum for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Skipping fop$tnum for security"
+ return
+ }
+ cleanup $testdir NULL
+
+ # Define absolute pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ set reldir $testdir
+
+ # Name subdirectories for renaming from one directory to another.
+ set subdira A
+ set subdirb B
+
+ # Set up absolute and relative pathnames for test
+ set paths [list "absolute $fulldir" "relative $reldir"]
+ set files [list "fop$tnum-old.db fop$tnum-new.db {name change}" \
+ "fop$tnum.db fop$tnum.db {directory change}"]
+
+ foreach pathinfo $paths {
+ set pathtype [lindex $pathinfo 0]
+ set path [lindex $pathinfo 1]
+ foreach fileinfo $files {
+ set desc [lindex $fileinfo 2]
+ puts "Fop$tnum: Test of $pathtype path $path with $desc"
+ set env NULL
+ set envargs ""
+
+ # Loop through test using the following rename options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ puts "\tFop$tnum.a: Create/rename with $op"
+ # If we are using an env, then testfile should
+ # be the db name. Otherwise it is the path we
+ # are testing and the name.
+ #
+ set old [lindex $fileinfo 0]
+ set new [lindex $fileinfo 1]
+ # Set up subdirectories if necessary.
+ if { $desc == "directory change" } {
+ file mkdir $testdir/$subdira
+ file mkdir $testdir/$subdirb
+ set oldname $subdira/$old
+ set newname $subdirb/$new
+ set oldextent $subdira/__dbq.$old.0
+ set newextent $subdirb/__dbq.$new.0
+ } else {
+ set oldname $old
+ set newname $new
+ set oldextent __dbq.$old.0
+ set newextent __dbq.$new.0
+ }
+ # If we don't have an env, we're going to
+ # operate on the file using its absolute
+ # or relative path. Tack it on the front.
+ if { $op == "noenv" } {
+ set oldfile $path/$oldname
+ set newfile $path/$newname
+ set oldextent $path/$oldextent
+ set newextent $path/$newextent
+ } else {
+ set oldfile $oldname
+ set newfile $newname
+ set txnarg ""
+ if { $op == "auto" || $op == "commit" \
+ || $op == "abort" } {
+ set txnarg " -txn"
+ }
+ set env [eval {berkdb_env -create} \
+ $txnarg -home $path]
+ set envargs "-env $env"
+ error_check_good \
+ env_open [is_valid_env $env] TRUE
+ }
+
+ # Files don't exist before starting the test.
+ #
+ check_file_exist $oldfile $env $path 0
+ check_file_exist $newfile $env $path 0
+
+ puts "\t\tFop$tnum.a.1: Create file $oldfile"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $envargs $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Use numeric key so record-based methods
+ # don't need special treatment.
+ set key 1
+ set data data
+
+ error_check_good dbput \
+ [$db put $key [chop_data $method $data]] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tFop$tnum.a.2:\
+ Rename file to $newfile"
+ check_file_exist $oldfile $env $path 1
+ check_file_exist $newfile $env $path 0
+
+ # Regular renames use berkdb dbrename
+ # Txn-protected renames use $env dbrename.
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good rename [eval \
+ {berkdb dbrename} $envargs \
+ $oldfile $newfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good rename [eval \
+ {$env dbrename} -auto_commit \
+ $oldfile $newfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good rename [eval \
+ {$env dbrename} -txn $txn \
+ $oldfile $newfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ if { $op != "abort" } {
+ check_file_exist $oldfile $env $path 0
+ check_file_exist $newfile $env $path 1
+ } else {
+ check_file_exist $oldfile $env $path 1
+ check_file_exist $newfile $env $path 0
+ }
+
+ # Check that extent files moved too, unless
+ # we aborted the rename.
+ if { [is_queueext $method ] == 1 } {
+ if { $op != "abort" } {
+ check_file_exist \
+ $oldextent $env $path 0
+ check_file_exist \
+ $newextent $env $path 1
+ } else {
+ check_file_exist \
+ $oldextent $env $path 1
+ check_file_exist \
+ $newextent $env $path 0
+ }
+ }
+
+ puts "\t\tFop$tnum.a.3: Check file contents"
+ # Open again with create to make sure we're not
+ # caching. In the normal case (no env), we
+ # already know the file doesn't exist.
+ set odb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $newfile]
+ error_check_good \
+ odb_open [is_valid_db $odb] TRUE
+ error_check_good \
+ ndb_open [is_valid_db $ndb] TRUE
+
+ # The DBT from the "old" database should be
+ # empty, not the "new" one, except in the case
+ # of an abort.
+ set odbt [$odb get $key]
+ if { $op == "abort" } {
+ error_check_good \
+ odbt_has_data [llength $odbt] 1
+ } else {
+ set ndbt [$ndb get $key]
+ error_check_good \
+ odbt_empty [llength $odbt] 0
+ error_check_bad \
+ ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt \
+ [lindex [lindex $ndbt 0] 1] \
+ [pad_data $method $data]
+ }
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ # Now there's both an old and a new. Rename the
+ # "new" to the "old" and make sure that fails.
+ #
+ puts "\tFop$tnum.b: Make sure rename fails\
+ instead of overwriting"
+ set envargs ""
+ if { $env != "NULL" } {
+ error_check_good \
+ env_close [$env close] 0
+ set env [berkdb_env_noerr -home $path]
+ set envargs " -env $env"
+ error_check_good env_open2 \
+ [is_valid_env $env] TRUE
+ }
+ set ret [catch {eval {berkdb dbrename} \
+ $envargs $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret \
+ [is_substr $errorCode EEXIST] 1
+
+ # Verify and then start over from a clean slate.
+ verify_dir $path "\tFop$tnum.c: "
+ verify_dir $path/$subdira "\tFop$tnum.c: "
+ verify_dir $path/$subdirb "\tFop$tnum.c: "
+ if { $env != "NULL" } {
+ error_check_good \
+ env_close2 [$env close] 0
+ }
+ env_cleanup $path
+ check_file_exist $oldfile $env $path 0
+ check_file_exist $newfile $env $path 0
+ }
+ }
+ }
+}
+
+proc check_file_exist { filename env path expected } {
+ if { $env != "NULL" } {
+ error_check_good "$filename exists in env" \
+ [file exists $path/$filename] $expected
+ } else {
+ error_check_good \
+ "$filename exists" [file exists $filename] $expected
+ }
+}
diff --git a/db-4.8.30/test/fop005.tcl b/db-4.8.30/test/fop005.tcl
new file mode 100644
index 0000000..ccb5eea
--- /dev/null
+++ b/db-4.8.30/test/fop005.tcl
@@ -0,0 +1,147 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop005
+# TEST Test of DB->remove()
+# TEST Formerly test080.
+# TEST Test use of dbremove with and without envs, with absolute
+# TEST and relative paths, and with subdirectories.
+
+proc fop005 { method args } {
+ source ./include.tcl
+
+ set tnum "005"
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Fop$tnum: ($method $args): Test of DB->remove()"
+
+ # Determine full path
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ set reldir $testdir
+
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Skipping fop$tnum for env $env"
+ return
+ }
+ cleanup $testdir NULL
+
+ # Set up absolute and relative pathnames, and a subdirectory.
+ set subdira A
+ set filename fop$tnum.db
+ set extentname __dbq.$filename.0
+ set paths [list $fulldir $reldir]
+ set files [list "$filename $extentname"\
+ "$subdira/$filename $subdira/$extentname"]
+
+ foreach path $paths {
+ foreach fileset $files {
+ set filename [lindex $fileset 0]
+ set extentname [lindex $fileset 1]
+
+ # Loop through test using the following options:
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. remove with auto-commit
+ # 4. remove in committed transaction
+ # 5. remove in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+ file mkdir $testdir/$subdira
+ if { $op == "noenv" } {
+ set file $path/$filename
+ set extentfile $path/$extentname
+ set env NULL
+ set envargs ""
+ } else {
+ set file $filename
+ set extentfile $extentname
+ set largs " -txn"
+ if { $op == "env" } {
+ set largs ""
+ }
+ set env [eval {berkdb_env -create \
+ -home $path} $largs]
+ set envargs " -env $env "
+ error_check_good \
+ env_open [is_valid_env $env] TRUE
+ }
+
+ puts "\tFop$tnum: dbremove with $op\
+ in path $path"
+ puts "\t\tFop$tnum.a.1: Create file $file"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $envargs $args {$file}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Use a numeric key so record-based methods
+ # don't need special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput \
+ [$db put $key [chop_data $method $data]] 0
+ error_check_good dbclose [$db close] 0
+ check_file_exist $file $env $path 1
+ if { [is_queueext $method] == 1 } {
+ check_file_exist \
+ $extentfile $env $path 1
+ }
+
+ # Use berkdb dbremove for non-txn tests
+ # and $env dbremove for transactional tests
+ puts "\t\tFop$tnum.a.2: Remove file"
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good remove_$op \
+ [eval {berkdb dbremove} \
+ $envargs $file] 0
+ } elseif { $op == "auto" } {
+ error_check_good remove_$op \
+ [eval {$env dbremove} \
+ -auto_commit $file] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good remove_$op \
+ [eval {$env dbremove} \
+ -txn $txn $file] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ puts "\t\tFop$tnum.a.3: Check that file is gone"
+ # File should now be gone, unless the op is an
+ # abort. Check extent files if necessary.
+ if { $op != "abort" } {
+ check_file_exist $file $env $path 0
+ if { [is_queueext $method] == 1 } {
+ check_file_exist \
+ $extentfile $env $path 0
+ }
+ } else {
+ check_file_exist $file $env $path 1
+ if { [is_queueext $method] == 1 } {
+ check_file_exist \
+ $extentfile $env $path 1
+ }
+ }
+
+ if { $env != "NULL" } {
+ error_check_good envclose [$env close] 0
+ }
+ env_cleanup $path
+ check_file_exist $file $env $path 0
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/fop006.tcl b/db-4.8.30/test/fop006.tcl
new file mode 100644
index 0000000..465a60e
--- /dev/null
+++ b/db-4.8.30/test/fop006.tcl
@@ -0,0 +1,185 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop006
+# TEST Test file system operations in multiple simultaneous
+# TEST transactions. Start one transaction, do a file operation.
+# TEST Start a second transaction, do a file operation. Abort
+# TEST or commit txn1, then abort or commit txn2, and check for
+# TEST appropriate outcome.
+proc fop006 { method { inmem 0 } args } {
+ source ./include.tcl
+
+ # The variable inmem determines whether the test is being
+ # run on regular named databases or named in-memory databases.
+ if { $inmem == 0 } {
+ set tnum "006"
+ set string "regular named databases"
+ set operator do_op
+ } else {
+ set tnum "008"
+ set string "in-memory named databases"
+ set operator do_inmem_op
+ }
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping fop$tnum for method $method"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+ puts "\nFop$tnum ($method): Two file system ops,\
+ each in its own transaction, for $string."
+
+ set exists {a b}
+ set noexist {foo bar}
+ set open {}
+ set cases {}
+ set ops {open open_create open_excl rename remove truncate}
+
+ # Set up cases where op1 is successful.
+ foreach retval { 0 "file exists" "no such file" } {
+ foreach end1 {abort commit} {
+ foreach op1 $ops {
+ foreach op2 $ops {
+ append cases " " [create_tests\
+ $op1 $op2 $exists $noexist\
+ $open $retval $end1]
+ }
+ }
+ }
+ }
+
+ # Set up evil two-op cases (op1 fails). Omit open_create
+ # and truncate from op1 list -- open_create always succeeds
+ # and truncate requires a successful open.
+ foreach retval { 0 "file exists" "no such file" } {
+ foreach op1 { rename remove open open_excl } {
+ foreach op2 $ops {
+ append cases " " [create_badtests $op1 $op2 \
+ $exists $noexist $open $retval $end1]
+ }
+ }
+ }
+
+ # The structure of each case is:
+ # {{op1 {args} result end} {op2 {args} result}}
+ # A result of "0" indicates no error is expected. Otherwise,
+ # the result is the expected error message. The value of "end"
+ # indicates whether the transaction will be aborted or committed.
+ #
+ # Comment this loop out to remove the list of cases.
+# set i 1
+# foreach case $cases {
+# puts "\tFop$tnum.$i: $case"
+# incr i
+# }
+
+ # To run a particular case, add the case in this format and
+ # uncomment.
+# set cases {
+# {{open_excl {foo} 0 abort} {rename {b foo} 0}}
+# }
+
+ set testid 0
+
+ # Run all the cases
+ foreach case $cases {
+ incr testid
+
+ # Extract elements of the case
+ set op1 [lindex [lindex $case 0] 0]
+ set names1 [lindex [lindex $case 0] 1]
+ set res1 [lindex [lindex $case 0] 2]
+ set end1 [lindex [lindex $case 0] 3]
+
+ set op2 [lindex [lindex $case 1] 0]
+ set names2 [lindex [lindex $case 1] 1]
+ set res2 [lindex [lindex $case 1] 2]
+
+ puts "\tFop$tnum.$testid: $op1 ($names1) $res1 $end1;\
+ $op2 ($names2) $res2."
+
+ foreach end2 { abort commit } {
+ # Create transactional environment.
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # Create databases
+ if { $inmem == 0 } {
+ set db [eval {berkdb_open -create} \
+ $omethod $args -env $env -auto_commit a]
+ } else {
+ set db [eval {berkdb_open -create} \
+ $omethod $args -env $env -auto_commit {""} a]
+ }
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_put \
+ [$db put 1 [chop_data $method a]] 0
+ error_check_good db_close [$db close] 0
+
+ if { $inmem == 0 } {
+ set db [eval {berkdb_open -create} \
+ $omethod $args -env $env -auto_commit b]
+ } else {
+ set db [eval {berkdb_open -create} \
+ $omethod $args -env $env -auto_commit {""} b]
+ }
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_put \
+ [$db put 1 [chop_data $method a]] 0
+ error_check_good db_close [$db close] 0
+
+ # Start transaction 1 and perform a file op.
+ set txn1 [$env txn]
+ error_check_good \
+ txn_begin [is_valid_txn $txn1 $env] TRUE
+ set result1 [$operator $omethod $op1 $names1 $txn1 $env $args]
+ if { $res1 == 0 } {
+ error_check_good \
+ op1_should_succeed $result1 $res1
+ } else {
+ set error [extract_error $result1]
+ error_check_good op1_wrong_failure $error $res1
+ }
+
+ # Start transaction 2 before ending transaction 1.
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ fopscript.tcl $testdir/fop$tnum.log \
+ $operator $omethod $op2 $end2 $res2 $names2 &]
+
+ # Sleep a bit to give txn2 a chance to block.
+ tclsleep 2
+
+ # End transaction 1 and close any open db handles.
+ # Txn2 will now unblock and finish.
+ error_check_good txn1_$end1 [$txn1 $end1] 0
+ set handles [berkdb handles]
+ foreach handle $handles {
+ if {[string range $handle 0 1] == "db" } {
+ error_check_good \
+ db_close [$handle close] 0
+ }
+ }
+ watch_procs $pid 1 60 1
+
+ # Clean up for next case
+ error_check_good env_close [$env close] 0
+ catch { [berkdb envremove -home $testdir] } res
+
+ # Check for errors in log file.
+ set errstrings [eval findfail $testdir/fop$tnum.log]
+ foreach str $errstrings {
+ puts "FAIL: error message in log file: $str"
+ }
+ env_cleanup $testdir
+ }
+ }
+}
+
diff --git a/db-4.8.30/test/fop007.tcl b/db-4.8.30/test/fop007.tcl
new file mode 100644
index 0000000..a431819
--- /dev/null
+++ b/db-4.8.30/test/fop007.tcl
@@ -0,0 +1,21 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop007
+# TEST Test file system operations on named in-memory databases.
+# TEST Combine two ops in one transaction.
+proc fop007 { method args } {
+
+ # Queue extents are not allowed with in-memory databases.
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping fop007 for method $method."
+ return
+ }
+ eval {fop001 $method 1} $args
+}
+
+
+
diff --git a/db-4.8.30/test/fop008.tcl b/db-4.8.30/test/fop008.tcl
new file mode 100644
index 0000000..e767f0c
--- /dev/null
+++ b/db-4.8.30/test/fop008.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST fop008
+# TEST Test file system operations on named in-memory databases.
+# TEST Combine two ops in one transaction.
+proc fop008 { method args } {
+ eval {fop006 $method 1} $args
+}
+
+
+
diff --git a/db-4.8.30/test/fopscript.tcl b/db-4.8.30/test/fopscript.tcl
new file mode 100644
index 0000000..7c898b0
--- /dev/null
+++ b/db-4.8.30/test/fopscript.tcl
@@ -0,0 +1,70 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Fop006 script - test of fileops in multiple transactions
+# Usage: fopscript
+# omethod: access method for database
+# op: file operation to perform
+# end: how to end the transaction (abort or commit)
+# result: expected result of the transaction
+# names: name(s) of files to operate on
+# args: additional args to do_op
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "fopscript operator omethod op end result names args"
+
+# Verify usage
+if { $argc < 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set operator [ lindex $argv 0 ]
+set omethod [ lindex $argv 1 ]
+set op [ lindex $argv 2 ]
+set end [ lindex $argv 3 ]
+set result [ lindex $argv 4 ]
+set names [ lindex $argv 5 ]
+set args [lindex [lrange $argv 6 end] 0]
+
+# Join the env
+set dbenv [eval berkdb_env -home $testdir]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+# Start transaction
+puts "\tFopscript.a: begin 2nd transaction (will block)"
+set txn2 [$dbenv txn]
+error_check_good txn2_begin [is_valid_txn $txn2 $dbenv] TRUE
+
+# Execute op2
+set op2result [$operator $omethod $op $names $txn2 $dbenv $args]
+
+# End txn2
+error_check_good txn2_end [$txn2 $end] 0
+if {$result == 0} {
+ error_check_good op2_should_succeed $op2result $result
+} else {
+ set error [extract_error $op2result]
+ error_check_good op2_wrong_failure $error $result
+}
+
+# Close any open db handles. We had to wait until now
+# because you can't close a database inside a transaction.
+set handles [berkdb handles]
+foreach handle $handles {
+ if {[string range $handle 0 1] == "db" } {
+ error_check_good db_close [$handle close] 0
+ }
+}
+
+# Close the env
+error_check_good dbenv_close [$dbenv close] 0
+puts "\tFopscript completed successfully"
+
diff --git a/db-4.8.30/test/foputils.tcl b/db-4.8.30/test/foputils.tcl
new file mode 100644
index 0000000..fc0d301
--- /dev/null
+++ b/db-4.8.30/test/foputils.tcl
@@ -0,0 +1,484 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+proc do_op {omethod op names txn env {largs ""}} {
+ switch -exact $op {
+ delete { do_delete $names }
+ rename { do_rename $names $txn $env }
+ remove { do_remove $names $txn $env }
+ noop { do_noop }
+ open_create { do_create $omethod $names $txn $env $largs }
+ open { do_open $omethod $names $txn $env $largs }
+ open_excl { do_create_excl $omethod $names $txn $env $largs }
+ truncate { do_truncate $omethod $names $txn $env $largs }
+ default { puts "FAIL: operation $op not recognized" }
+ }
+}
+
+proc do_subdb_op {omethod op names txn env {largs ""}} {
+ #
+ # The 'noop' and 'delete' actions are the same
+ # for subdbs as for regular db files.
+ #
+ switch -exact $op {
+ delete { do_delete $names }
+ rename { do_subdb_rename $names $txn $env }
+ remove { do_subdb_remove $names $txn $env }
+ noop { do_noop }
+ default { puts "FAIL: operation $op not recognized" }
+ }
+}
+
+proc do_inmem_op {omethod op names txn env {largs ""}} {
+ #
+ # The in-memory versions of do_op are different in
+ # that we don't need to pass in the filename, just
+ # the subdb names.
+ #
+ switch -exact $op {
+ delete { do_delete $names }
+ rename { do_inmem_rename $names $txn $env }
+ remove { do_inmem_remove $names $txn $env }
+ noop { do_noop }
+ open_create { do_inmem_create $omethod $names $txn $env $largs }
+ open { do_inmem_open $omethod $names $txn $env $largs }
+ open_excl { do_inmem_create_excl $omethod $names $txn $env $largs }
+ truncate { do_inmem_truncate $omethod $names $txn $env $largs }
+ default { puts "FAIL: operation $op not recognized" }
+ }
+}
+
+proc do_delete {names} {
+ #
+ # This is the odd man out among the ops -- it's not a Berkeley
+ # DB file operation, but mimics an operation done externally,
+ # as if a user deleted a file with "rm" or "erase".
+ #
+ # We assume the file is found in $testdir.
+ #
+ global testdir
+
+ if {[catch [fileremove -f $testdir/$names] result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_noop { } {
+ # Do nothing. Report success.
+ return 0
+}
+
+proc do_rename {names txn env} {
+ # Pull db names out of $names
+ set oldname [lindex $names 0]
+ set newname [lindex $names 1]
+
+ if {[catch {eval $env dbrename -txn $txn \
+ $oldname $newname} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_subdb_rename {names txn env} {
+ # Pull db and subdb names out of $names
+ set filename [lindex $names 0]
+ set oldsname [lindex $names 1]
+ set newsname [lindex $names 2]
+
+ if {[catch {eval $env dbrename -txn $txn $filename \
+ $oldsname $newsname} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_inmem_rename {names txn env} {
+ # Pull db and subdb names out of $names
+ set filename ""
+ set oldsname [lindex $names 0]
+ set newsname [lindex $names 1]
+ if {[catch {eval $env dbrename -txn $txn {$filename} \
+ $oldsname $newsname} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+
+proc do_remove {names txn env} {
+ if {[catch {eval $env dbremove -txn $txn $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_subdb_remove {names txn env} {
+ set filename [lindex $names 0]
+ set subname [lindex $names 1]
+ if {[catch {eval $env dbremove -txn $txn $filename $subname} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_inmem_remove {names txn env} {
+ if {[catch {eval $env dbremove -txn $txn {""} $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_create {omethod names txn env {largs ""}} {
+ if {[catch {eval berkdb_open -create $omethod $largs -env $env \
+ -txn $txn $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_inmem_create {omethod names txn env {largs ""}} {
+ if {[catch {eval berkdb_open -create $omethod $largs -env $env \
+ -txn $txn "" $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_open {omethod names txn env {largs ""}} {
+ if {[catch {eval berkdb_open $omethod $largs -env $env \
+ -txn $txn $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_inmem_open {omethod names txn env {largs ""}} {
+ if {[catch {eval berkdb_open $omethod $largs -env $env \
+ -txn $txn {""} $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_create_excl {omethod names txn env {largs ""}} {
+ if {[catch {eval berkdb_open -create -excl $omethod $largs -env $env \
+ -txn $txn $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_inmem_create_excl {omethod names txn env {largs ""}} {
+ if {[catch {eval berkdb_open -create -excl $omethod $largs -env $env \
+ -txn $txn {""} $names} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_truncate {omethod names txn env {largs ""}} {
+ # First we have to get a handle. We omit the -create flag
+ # because testing of truncate is meaningful only in cases
+ # where the database already exists.
+ set db [eval {berkdb_open $omethod} $largs {-env $env -txn $txn $names}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if {[catch {$db truncate -txn $txn} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc do_inmem_truncate {omethod names txn env {largs ""}} {
+ set db [eval {berkdb_open $omethod} $largs {-env $env -txn $txn "" $names}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if {[catch {$db truncate -txn $txn} result]} {
+ return $result
+ } else {
+ return 0
+ }
+}
+
+proc create_tests { op1 op2 exists noexist open retval { end1 "" } } {
+ set retlist {}
+ switch $op1 {
+ rename {
+ # Use first element from exists list
+ set from [lindex $exists 0]
+ # Use first element from noexist list
+ set to [lindex $noexist 0]
+
+ # This is the first operation, which should succeed
+ set op1ret [list $op1 "$from $to" 0 $end1]
+
+ # Adjust 'exists' and 'noexist' list if and only if
+ # txn1 was not aborted.
+ if { $end1 != "abort" } {
+ set exists [lreplace $exists 0 0 $to]
+ set noexist [lreplace $noexist 0 0 $from]
+ }
+ }
+ remove {
+ set from [lindex $exists 0]
+ set op1ret [list $op1 $from 0 $end1]
+
+ if { $end1 != "abort" } {
+ set exists [lreplace $exists 0 0]
+ set noexist [lreplace $noexist 0 0 $from]
+ }
+ }
+ open_create -
+ open -
+ truncate {
+ set from [lindex $exists 0]
+ set op1ret [list $op1 $from 0 $end1]
+
+ if { $end1 != "abort" } {
+ set exists [lreplace $exists 0 0]
+ set open [list $from]
+ }
+
+ # Eliminate the 1st element in noexist: it is
+ # equivalent to the 2nd element (neither ever exists).
+ set noexist [lreplace $noexist 0 0]
+ }
+ open_excl {
+ # Use first element from noexist list
+ set from [lindex $noexist 0]
+ set op1ret [list $op1 $from 0 $end1]
+
+ if { $end1 != "abort" } {
+ set noexist [lreplace $noexist 0 0]
+ set open [list $from]
+ }
+
+ # Eliminate the 1st element in exists: it is
+ # equivalent to the 2nd element (both already exist).
+ set exists [lreplace $exists 0 0]
+ }
+ }
+
+ # Generate possible second operations given the return value.
+ set op2list [create_op2 $op2 $exists $noexist $open $retval]
+
+ foreach o $op2list {
+ lappend retlist [list $op1ret $o]
+ }
+ return $retlist
+}
+
+proc create_badtests { op1 op2 exists noexist open retval {end1 ""} } {
+ set retlist {}
+ switch $op1 {
+ rename {
+ # Use first element from exists list
+ set from [lindex $exists 0]
+ # Use first element from noexist list
+ set to [lindex $noexist 0]
+
+ # This is the first operation, which should fail
+ set op1list1 \
+ [list $op1 "$to $to" "no such file" $end1]
+ set op1list2 \
+ [list $op1 "$to $from" "no such file" $end1]
+ set op1list3 \
+ [list $op1 "$from $from" "file exists" $end1]
+ set op1list [list $op1list1 $op1list2 $op1list3]
+
+ # Generate second operations given the return value.
+ set op2list [create_op2 \
+ $op2 $exists $noexist $open $retval]
+ foreach op1 $op1list {
+ foreach op2 $op2list {
+ lappend retlist [list $op1 $op2]
+ }
+ }
+ return $retlist
+ }
+ remove -
+ open -
+ truncate {
+ set file [lindex $noexist 0]
+ set op1list [list $op1 $file "no such file" $end1]
+
+ set op2list [create_op2 \
+ $op2 $exists $noexist $open $retval]
+ foreach op2 $op2list {
+ lappend retlist [list $op1list $op2]
+ }
+ return $retlist
+ }
+ open_excl {
+ set file [lindex $exists 0]
+ set op1list [list $op1 $file "file exists" $end1]
+ set op2list [create_op2 \
+ $op2 $exists $noexist $open $retval]
+ foreach op2 $op2list {
+ lappend retlist [list $op1list $op2]
+ }
+ return $retlist
+ }
+ }
+}
+
+proc create_op2 { op2 exists noexist open retval } {
+ set retlist {}
+ switch $op2 {
+ rename {
+ # Successful renames arise from renaming existing
+ # to non-existing files.
+ if { $retval == 0 } {
+ set old $exists
+ set new $noexist
+ set retlist \
+ [build_retlist $op2 $old $new $retval]
+ }
+ # "File exists" errors arise from renaming existing
+ # to existing files.
+ if { $retval == "file exists" } {
+ set old $exists
+ set new $exists
+ set retlist \
+ [build_retlist $op2 $old $new $retval]
+ }
+ # "No such file" errors arise from renaming files
+ # that don't exist.
+ if { $retval == "no such file" } {
+ set old $noexist
+ set new $exists
+ set retlist1 \
+ [build_retlist $op2 $old $new $retval]
+
+ set old $noexist
+ set new $noexist
+ set retlist2 \
+ [build_retlist $op2 $old $new $retval]
+
+ set retlist [concat $retlist1 $retlist2]
+ }
+ }
+ remove {
+ # Successful removes result from removing existing
+ # files.
+ if { $retval == 0 } {
+ set file $exists
+ }
+ # "File exists" does not happen in remove.
+ if { $retval == "file exists" } {
+ return
+ }
+ # "No such file" errors arise from trying to remove
+ # files that don't exist.
+ if { $retval == "no such file" } {
+ set file $noexist
+ }
+ set retlist [build_retlist $op2 $file "" $retval]
+ }
+ open_create {
+ # Open_create should be successful with existing,
+ # open, or non-existing files.
+ if { $retval == 0 } {
+ set file [concat $exists $open $noexist]
+ }
+ # "File exists" and "no such file"
+ # do not happen in open_create.
+ if { $retval == "file exists" || \
+ $retval == "no such file" } {
+ return
+ }
+ set retlist [build_retlist $op2 $file "" $retval]
+ }
+ open {
+ # Open should be successful with existing or open files.
+ if { $retval == 0 } {
+ set file [concat $exists $open]
+ }
+ # "No such file" errors arise from trying to open
+ # non-existent files.
+ if { $retval == "no such file" } {
+ set file $noexist
+ }
+ # "File exists" errors do not happen in open.
+ if { $retval == "file exists" } {
+ return
+ }
+ set retlist [build_retlist $op2 $file "" $retval]
+ }
+ open_excl {
+ # Open_excl should be successful with non-existent files.
+ if { $retval == 0 } {
+ set file $noexist
+ }
+ # "File exists" errors arise from trying to open
+ # existing files.
+ if { $retval == "file exists" } {
+ set file [concat $exists $open]
+ }
+ # "No such file" errors do not arise in open_excl.
+ if { $retval == "no such file" } {
+ return
+ }
+ set retlist [build_retlist $op2 $file "" $retval]
+ }
+ truncate {
+ # Truncate should be successful with existing files.
+ if { $retval == 0 } {
+ set file $exists
+ }
+ # No other return values are meaningful to test since
+ # do_truncate starts with an open and we've already
+ # tested open.
+ if { $retval == "no such file" || \
+ $retval == "file exists" } {
+ return
+ }
+ set retlist [build_retlist $op2 $file "" $retval]
+ }
+ }
+ return $retlist
+}
+
+proc build_retlist { op2 file1 file2 retval } {
+ set retlist {}
+ if { $file2 == "" } {
+ foreach f1 $file1 {
+ lappend retlist [list $op2 $f1 $retval]
+ }
+ } else {
+ foreach f1 $file1 {
+ foreach f2 $file2 {
+ lappend retlist [list $op2 "$f1 $f2" $retval]
+ }
+ }
+ }
+ return $retlist
+}
+
+proc extract_error { message } {
+ if { [is_substr $message "exists"] == 1 } {
+ set message "file exists"
+ } elseif {[is_substr $message "no such file"] == 1 } {
+ set message "no such file"
+ }
+ return $message
+}
diff --git a/db-4.8.30/test/hsearch.tcl b/db-4.8.30/test/hsearch.tcl
new file mode 100644
index 0000000..519d05c
--- /dev/null
+++ b/db-4.8.30/test/hsearch.tcl
@@ -0,0 +1,50 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Historic Hsearch interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc hsearch { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "HSEARCH interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good hcreate [berkdb hcreate $nentries] 0
+ set did [open $dict]
+ set count 0
+
+ puts "\tHSEARCH.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ret [berkdb hsearch $str $str enter]
+ error_check_good hsearch:enter $ret 0
+
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+
+ puts "\tHSEARCH.b: re-get loop"
+ set did [open $dict]
+ # Here is the loop where we retrieve each key
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+ error_check_good hdestroy [berkdb hdestroy] 0
+}
diff --git a/db-4.8.30/test/include.tcl b/db-4.8.30/test/include.tcl
new file mode 100644
index 0000000..25b652b
--- /dev/null
+++ b/db-4.8.30/test/include.tcl
@@ -0,0 +1,32 @@
+# Automatically built by dist/s_test; may require local editing.
+
+set tclsh_path @TCL_TCLSH@
+set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@@LIBTSO_MODSUFFIX@
+
+set rpc_server localhost
+set rpc_path .
+set rpc_testdir $rpc_path/TESTDIR
+
+set src_root @srcdir@/..
+set test_path @srcdir@/../test
+set je_root @srcdir@/../../je
+
+global testdir
+set testdir ./TESTDIR
+
+global dict
+global util_path
+
+global is_freebsd_test
+global is_hp_test
+global is_linux_test
+global is_qnx_test
+global is_sunos_test
+global is_windows_test
+global is_windows9x_test
+
+global valid_methods
+global checking_valid_methods
+global test_recopts
+
+set KILL "@KILL@"
diff --git a/db-4.8.30/test/join.tcl b/db-4.8.30/test/join.tcl
new file mode 100644
index 0000000..928bbf7
--- /dev/null
+++ b/db-4.8.30/test/join.tcl
@@ -0,0 +1,454 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST jointest
+# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+# TEST with differing index orders and selectivity.
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
+# TEST work, everything else does as well. We'll create test databases
+# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
+# TEST the database describes the duplication -- duplicates are of the
+# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
+# TEST Primary.db is the primary database, and null.db is the database
+# TEST that has no matching duplicates.
+# TEST
+# TEST We should test this on all btrees, all hash, and a combination thereof
+proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
+ global testdir
+ global rand_init
+ source ./include.tcl
+
+ env_cleanup $testdir
+ berkdb srand $rand_init
+
+ # Use one environment for all database opens so we don't
+ # need oodles of regions.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # With the new offpage duplicate code, we don't support
+ # duplicate duplicates in sorted dup sets. Thus, if with_dup_dups
+ # is greater than one, run only with "-dup".
+ if { $with_dup_dups > 1 } {
+ set doptarray {"-dup"}
+ } else {
+ set doptarray {"-dup -dupsort" "-dup" RANDOMMIX RANDOMMIX }
+ }
+
+ # NB: these flags are internal only, ok
+ foreach m "DB_BTREE DB_HASH DB_BOTH" {
+ # run with two different random mixes.
+ foreach dopt $doptarray {
+ set opt [list "-env" $env $dopt]
+
+ puts "Join test: ($m $dopt) psize $psize,\
+ $with_dup_dups dup\
+ dups, flags $flags."
+
+ build_all $m $psize $opt oa $with_dup_dups
+
+ # null.db is db_built fifth but is referenced by
+ # zero; set up the option array appropriately.
+ set oa(0) $oa(5)
+
+ # Build the primary
+ puts "\tBuilding the primary database $m"
+ set oflags "-create -truncate -mode 0644 -env $env\
+ [conv $m [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ set key [format "%04d" $i]
+ set ret [$db put $key stub]
+ error_check_good "primary put" $ret 0
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join primary.db "1 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "0 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 0 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 2 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups "3" $str2
+
+ # You really don't want to run this section
+ # with $with_dup_dups > 2.
+ if { $with_dup_dups <= 2 } {
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2 3" $str\
+ oa $flags $with_dup_dups "3 3 1" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "4 0 2" $str\
+ oa $flags $with_dup_dups "4 3 3" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "3 2 1" $str\
+ oa $flags $with_dup_dups "0 2" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "1 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "0 0 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "2 4 4" $str
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "0 0 4 4" $str
+ }
+ close $did
+ }
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc build_all { method psize opt oaname with_dup_dups {nentries 100} } {
+ global testdir
+ db_build join1.db $nentries 50 1 [conv $method 1]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join2.db $nentries 25 2 [conv $method 2]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join3.db $nentries 16 3 [conv $method 3]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join4.db $nentries 12 4 [conv $method 4]\
+ $psize $opt $oaname $with_dup_dups
+ db_build null.db $nentries 0 5 [conv $method 5]\
+ $psize $opt $oaname $with_dup_dups
+}
+
+proc conv { m i } {
+ switch -- $m {
+ DB_HASH { return "-hash"}
+ "-hash" { return "-hash"}
+ DB_BTREE { return "-btree"}
+ "-btree" { return "-btree"}
+ DB_BOTH {
+ if { [expr $i % 2] == 0 } {
+ return "-hash";
+ } else {
+ return "-btree";
+ }
+ }
+ }
+}
+
+proc random_opts { } {
+ set j [berkdb random_int 0 1]
+ if { $j == 0 } {
+ return " -dup"
+ } else {
+ return " -dup -dupsort"
+ }
+}
+
+proc db_build { name nkeys ndups dup_interval method psize lopt oaname \
+ with_dup_dups } {
+ source ./include.tcl
+
+ # Get array of arg names (from two levels up the call stack)
+ upvar 2 $oaname oa
+
+ # Search for "RANDOMMIX" in $opt, and if present, replace
+ # with " -dup" or " -dup -dupsort" at random.
+ set i [lsearch $lopt RANDOMMIX]
+ if { $i != -1 } {
+ set lopt [lreplace $lopt $i $i [random_opts]]
+ }
+
+ # Save off db_open arguments for this database.
+ set opt [eval concat $lopt]
+ set oa($dup_interval) $opt
+
+ # Create the database and open the dictionary
+ set oflags "-create -truncate -mode 0644 $method\
+ -pagesize $psize"
+ set db [eval {berkdb_open} $oflags $opt $name]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ puts -nonewline "\tBuilding $name: $nkeys keys "
+ puts -nonewline "with $ndups duplicates at interval of $dup_interval"
+ if { $with_dup_dups > 0 } {
+ puts ""
+ puts "\t\tand $with_dup_dups duplicate duplicates."
+ } else {
+ puts "."
+ }
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ set str $str$name
+ # We need to make sure that the dups are inserted in a
+ # random, or near random, order. Do this by generating
+ # them and putting each in a list, then sorting the list
+ # at random.
+ set duplist {}
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ lappend duplist $data
+ }
+ # randomize the list
+ for { set i 0 } { $i < $ndups } {incr i } {
+ # set j [berkdb random_int $i [expr $ndups - 1]]
+ set j [expr ($i % 2) + $i]
+ if { $j >= $ndups } { set j $i }
+ set dupi [lindex $duplist $i]
+ set dupj [lindex $duplist $j]
+ set duplist [lreplace $duplist $i $i $dupj]
+ set duplist [lreplace $duplist $j $j $dupi]
+ }
+ foreach data $duplist {
+ if { $with_dup_dups != 0 } {
+ for { set j 0 }\
+ { $j < $with_dup_dups }\
+ {incr j} {
+ set ret [$db put $str $data]
+ error_check_good put$j $ret 0
+ }
+ } else {
+ set ret [$db put $str $data]
+ error_check_good put $ret 0
+ }
+ }
+
+ if { $ndups == 0 } {
+ set ret [$db put $str NODUP]
+ error_check_good put $ret 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join { primary dbs key oanm flags with_dup_dups {dbs2 ""} {key2 ""} } {
+ global testdir
+ source ./include.tcl
+
+ upvar $oanm oa
+
+ puts -nonewline "\tJoining: $dbs on $key"
+ if { $dbs2 == "" } {
+ puts ""
+ } else {
+ puts " with $dbs2 on $key2"
+ }
+
+ # Open all the databases
+ set p [berkdb_open -unknown $testdir/$primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ set ndx [llength $dbs]
+
+ foreach i [concat $dbs $dbs2] {
+ set opt $oa($i)
+ set db [eval {berkdb_open -unknown} $opt [n_to_name $i]]
+ error_check_good "[n_to_name $i] open" [is_valid_db $db] TRUE
+ set curs [$db cursor]
+ error_check_good "$db cursor" \
+ [is_substr $curs "$db.c"] 1
+ lappend dblist $db
+ lappend curslist $curs
+
+ if { $ndx > 0 } {
+ set realkey [concat $key[n_to_name $i]]
+ } else {
+ set realkey [concat $key2[n_to_name $i]]
+ }
+
+ set pair [$curs get -set $realkey]
+ error_check_good cursor_set:$realkey:$pair \
+ [llength [lindex $pair 0]] 2
+
+ incr ndx -1
+ }
+
+ set join_curs [eval {$p join} $curslist]
+ error_check_good join_cursor \
+ [is_substr $join_curs "$p.c"] 1
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n [concat $dbs $dbs2] {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ # If $with_dup_dups is greater than zero, each datum has
+ # been inserted $with_dup_dups times. So we expect the number
+ # of dups to go up by a factor of ($with_dup_dups)^(number of databases)
+
+ if { $with_dup_dups > 0 } {
+ foreach n [concat $dbs $dbs2] {
+ set expected [expr $expected * $with_dup_dups]
+ }
+ }
+
+ set ndups 0
+ if { $flags == " -join_item"} {
+ set l 1
+ } else {
+ set flags ""
+ set l 2
+ }
+ for { set pair [eval {$join_curs get} $flags] } { \
+ [llength [lindex $pair 0]] == $l } {
+ set pair [eval {$join_curs get} $flags] } {
+ set k [lindex [lindex $pair 0] 0]
+ foreach i $dbs {
+ error_check_bad valid_dup:$i:$dbs $i 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good valid_dup:$i:$dbs [expr $kval % $i] 0
+ }
+ incr ndups
+ }
+ error_check_good number_of_dups:$dbs $ndups $expected
+
+ error_check_good close_primary [$p close] 0
+ foreach i $curslist {
+ error_check_good close_cursor:$i [$i close] 0
+ }
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_name { n } {
+global testdir
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return join$n.db;
+ }
+}
+
+proc gcd { a b } {
+ set g 1
+
+ for { set i 2 } { $i <= $a } { incr i } {
+ if { [expr $a % $i] == 0 && [expr $b % $i] == 0 } {
+ set g $i
+ }
+ }
+ return $g
+}
diff --git a/db-4.8.30/test/lock001.tcl b/db-4.8.30/test/lock001.tcl
new file mode 100644
index 0000000..fe4ce50
--- /dev/null
+++ b/db-4.8.30/test/lock001.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+
+# TEST lock001
+# TEST Make sure that the basic lock tests work. Do some simple gets
+# TEST and puts for a single locker.
+proc lock001 { {iterations 1000} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ # Set defaults
+ # Adjusted to make exact match of isqrt
+ #set conflicts { 3 0 0 0 0 0 1 0 1 1}
+ #set conflicts { 3 0 0 0 0 1 0 1 1}
+
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Cleanup
+ env_cleanup $testdir
+
+ # Open the region we'll use for testing.
+ set eflags "-create -lock -home $testdir -mode 0644 \
+ -lock_conflict {$nmodes {$conflicts}}"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+ error_check_good lock_id_set \
+ [$env lock_id_set $lock_curid $lock_maxid] 0
+
+ puts "Lock001: test basic lock operations"
+ set locker [$env lock_id]
+ # Get and release each type of lock
+ puts "\tLock001.a: get and release each type of lock"
+ foreach m {ng write read} {
+ set obj obj$m
+ set lockp [$env lock_get $m $locker $obj]
+ error_check_good lock_get:a [is_blocked $lockp] 0
+ error_check_good lock_get:a [is_substr $lockp $env] 1
+ set ret [ $lockp put ]
+ error_check_good lock_put $ret 0
+ }
+
+ # Get a bunch of locks for the same locker; these should work
+ set obj OBJECT
+ puts "\tLock001.b: Get a bunch of locks for the same locker"
+ foreach m {ng write read} {
+ set lockp [$env lock_get $m $locker $obj ]
+ lappend locklist $lockp
+ error_check_good lock_get:b [is_blocked $lockp] 0
+ error_check_good lock_get:b [is_substr $lockp $env] 1
+ }
+ release_list $locklist
+
+ set locklist {}
+ # Check that reference counted locks work
+ puts "\tLock001.c: reference counted locks."
+ for {set i 0} { $i < 10 } {incr i} {
+ set lockp [$env lock_get -nowait write $locker $obj]
+ error_check_good lock_get:c [is_blocked $lockp] 0
+ error_check_good lock_get:c [is_substr $lockp $env] 1
+ lappend locklist $lockp
+ }
+ release_list $locklist
+
+ # Finally try some failing locks
+ set locklist {}
+ foreach i {ng write read} {
+ set lockp [$env lock_get $i $locker $obj]
+ lappend locklist $lockp
+ error_check_good lock_get:d [is_blocked $lockp] 0
+ error_check_good lock_get:d [is_substr $lockp $env] 1
+ }
+
+ # Change the locker
+ set locker [$env lock_id]
+ set blocklist {}
+ # Skip NO_LOCK lock.
+ puts "\tLock001.d: Change the locker, acquire read and write."
+ foreach i {write read} {
+ catch {$env lock_get -nowait $i $locker $obj} ret
+ error_check_good lock_get:e [is_substr $ret "not granted"] 1
+ #error_check_good lock_get:e [is_substr $lockp $env] 1
+ #error_check_good lock_get:e [is_blocked $lockp] 0
+ }
+ # Now release original locks
+ release_list $locklist
+
+ # Now re-acquire blocking locks
+ set locklist {}
+ puts "\tLock001.e: Re-acquire blocking locks."
+ foreach i {write read} {
+ set lockp [$env lock_get -nowait $i $locker $obj ]
+ error_check_good lock_get:f [is_substr $lockp $env] 1
+ error_check_good lock_get:f [is_blocked $lockp] 0
+ lappend locklist $lockp
+ }
+
+ # Now release new locks
+ release_list $locklist
+ error_check_good free_id [$env lock_id_free $locker] 0
+
+ error_check_good envclose [$env close] 0
+
+}
+
+# Blocked locks appear as lockmgrN.lockM\nBLOCKED
+proc is_blocked { l } {
+ if { [string compare $l BLOCKED ] == 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
diff --git a/db-4.8.30/test/lock002.tcl b/db-4.8.30/test/lock002.tcl
new file mode 100644
index 0000000..27a7145
--- /dev/null
+++ b/db-4.8.30/test/lock002.tcl
@@ -0,0 +1,154 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST lock002
+# TEST Exercise basic multi-process aspects of lock.
+proc lock002 { {conflicts {0 0 0 0 0 1 0 1 1} } } {
+ source ./include.tcl
+
+ puts "Lock002: Basic multi-process lock tests."
+
+ env_cleanup $testdir
+
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Open the lock
+ mlock_open $nmodes $conflicts
+ mlock_wait
+}
+
+# Make sure that we can create a region; destroy it, attach to it,
+# detach from it, etc.
+proc mlock_open { nmodes conflicts } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "\tLock002.a multi-process open/close test"
+
+ # Open/Create region here. Then close it and try to open from
+ # other test process.
+ set env_cmd [concat "berkdb_env -create -mode 0644 -lock \
+ -lock_conflict" [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ $local_env lock_id_set $lock_curid $lock_maxid
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Open from other test process
+ set env_cmd "berkdb_env -mode 0644 -home $testdir"
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Now make sure that we can reopen the region.
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Try closing the remote region
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:lock_close $ret 0
+
+ # Try opening for create. Will succeed because region exists.
+ set env_cmd [concat "berkdb_env -create -mode 0644 -lock \
+ -lock_conflict" [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ error_check_good remote:env_open [is_valid_env $local_env] TRUE
+
+ # close locally
+ reset_env $local_env
+
+ # Close and exit remote
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ catch { close $f1 } result
+}
+
+proc mlock_wait { } {
+ source ./include.tcl
+
+ puts "\tLock002.b multi-process get/put wait test"
+
+ # Open region locally
+ set env_cmd "berkdb_env -home $testdir"
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ # Open region remotely
+ set f1 [open |$tclsh_path r+]
+
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Get a write lock locally; try for the read lock
+ # remotely. We hold the locks for several seconds
+ # so that we can use timestamps to figure out if the
+ # other process waited.
+ set locker1 [$local_env lock_id]
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now request a lock that we expect to hang; generate
+ # timestamps so we can tell if it actually hangs.
+ set locker2 [send_cmd $f1 "$remote_env lock_id"]
+ set remote_lock [send_timed_cmd $f1 1 \
+ "set lock \[$remote_env lock_get write $locker2 object1\]"]
+
+ # Now sleep before releasing lock
+ tclsleep 5
+ set result [$local_lock put]
+ error_check_good lock_put $result 0
+
+ # Now get the result from the other script
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Now get the remote lock
+ set remote_lock [send_cmd $f1 "puts \$lock"]
+ error_check_good remote:lock_get \
+ [is_valid_lock $remote_lock $remote_env] TRUE
+
+ # Now make the other guy wait 5 seconds and then release his
+ # lock while we try to get a write lock on it.
+ set start [timestamp -r]
+
+ set ret [send_cmd $f1 "tclsleep 5"]
+
+ set ret [send_cmd $f1 "$remote_lock put"]
+
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get:time \
+ [expr [expr [timestamp -r] - $start] > 2] 1
+ error_check_good lock_get:local \
+ [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now check remote's result
+ set result [rcv_result $f1]
+ error_check_good lock_put:remote $result 0
+
+ # Clean up remote
+ set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
+ error_check_good remote_free_id $result 0
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ close $f1
+
+ # Now close up locally
+ set ret [$local_lock put]
+ error_check_good lock_put $ret 0
+ error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
+
+ reset_env $local_env
+}
diff --git a/db-4.8.30/test/lock003.tcl b/db-4.8.30/test/lock003.tcl
new file mode 100644
index 0000000..a2d1a83
--- /dev/null
+++ b/db-4.8.30/test/lock003.tcl
@@ -0,0 +1,100 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST lock003
+# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
+# TEST testers that try to randomly obtain locks; make sure that the locks
+# TEST correctly protect corresponding objects.
+proc lock003 { {iter 500} {max 1000} {procs 5} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set ldegree 5
+ set objs 75
+ set reads 65
+ set wait 1
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set seeds {}
+
+ puts "Lock003: Multi-process random lock test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ puts "\tLock003.a: Create environment"
+ set e [berkdb_env -create -lock -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+ $e lock_id_set $lock_curid $lock_maxid
+
+ error_check_good env_close [$e close] 0
+
+ # Now spawn off processes
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { [llength $seeds] == $procs } {
+ set s [lindex $seeds $i]
+ }
+# puts "$tclsh_path\
+# $test_path/wrap.tcl \
+# lockscript.tcl $testdir/$i.lockout\
+# $testdir $iter $objs $wait $ldegree $reads &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ lockscript.tcl $testdir/lock003.$i.out \
+ $testdir $iter $objs $wait $ldegree $reads &]
+ lappend pidlist $p
+ }
+
+ puts "\tLock003.b: $procs independent processes now running"
+ watch_procs $pidlist 30 10800
+
+ # Check for test failure
+ set errstrings [eval findfail [glob $testdir/lock003.*.out]]
+ foreach str $errstrings {
+ puts "FAIL: error message in .out file: $str"
+ }
+
+ # Remove log files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/lock003.$i.out
+ }
+}
+
+# Create and destroy flag files to show we have an object locked, and
+# verify that the correct files exist or don't exist given that we've
+# just read or write locked a file.
+proc lock003_create { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [open $pref.$rw.[pid].$obj w]
+ close $f
+}
+
+proc lock003_destroy { obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [glob -nocomplain $pref.*.[pid].$obj]
+ error_check_good l3_destroy [llength $f] 1
+ fileremove $f
+}
+
+proc lock003_vrfy { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ if { [string compare $rw "write"] == 0 } {
+ set fs [glob -nocomplain $pref.*.*.$obj]
+ error_check_good "number of other locks on $obj" [llength $fs] 0
+ } else {
+ set fs [glob -nocomplain $pref.write.*.$obj]
+ error_check_good "number of write locks on $obj" [llength $fs] 0
+ }
+}
+
diff --git a/db-4.8.30/test/lock004.tcl b/db-4.8.30/test/lock004.tcl
new file mode 100644
index 0000000..793c141
--- /dev/null
+++ b/db-4.8.30/test/lock004.tcl
@@ -0,0 +1,28 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST lock004
+# TEST Test locker ids wraping around.
+
+proc lock004 {} {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ set lock_curid [expr $lock_maxid - 1]
+ puts "Lock004: Locker id wraparound test"
+ puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
+
+ lock001
+ lock002
+ lock003
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/db-4.8.30/test/lock005.tcl b/db-4.8.30/test/lock005.tcl
new file mode 100644
index 0000000..a1574cf
--- /dev/null
+++ b/db-4.8.30/test/lock005.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST lock005
+# TEST Check that page locks are being released properly.
+
+proc lock005 { } {
+ source ./include.tcl
+
+ puts "Lock005: Page lock release test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ set e [berkdb_env -create -lock -home $testdir -txn -log]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create the database
+ set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Check that records are locking by trying to
+ # fetch a record on the wrong transaction.
+ puts "\tLock005.a: Verify that we are locking"
+
+ # Start the first transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Start second txn while the first is still running ...
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+
+ # ... and try to get a record from the first txn (should fail)
+ set ret [catch {$db get -txn $txn2 $recno1} res]
+ error_check_good dbget_wrong_record \
+ [is_substr $res "deadlock"] 1
+
+ # End transactions
+ error_check_good txn1commit [$txn1 commit] 0
+ how_many_locks 1 $e
+ error_check_good txn2commit [$txn2 commit] 0
+ # The number of locks stays the same here because the first
+ # lock is released and the second lock was never granted.
+ how_many_locks 1 $e
+
+ # Test lock behavior for both abort and commit
+ puts "\tLock005.b: Verify locks after abort or commit"
+ foreach endorder {forward reverse} {
+ end_order_test $db $e commit abort $endorder
+ end_order_test $db $e abort commit $endorder
+ end_order_test $db $e commit commit $endorder
+ end_order_test $db $e abort abort $endorder
+ }
+
+ # Clean up
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
+
+proc end_order_test { db e txn1end txn2end endorder } {
+ # Start one transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Check number of locks
+ how_many_locks 2 $e
+
+ # Start a second transaction while first is still running
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+ set ret [catch {$db put -txn $txn2 -append record2} recno2]
+ error_check_good dbput_txn2 $ret 0
+ how_many_locks 3 $e
+
+ # Now commit or abort one txn and make sure the other is okay
+ if {$endorder == "forward"} {
+ # End transaction 1 first
+ puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 2 $e
+
+ # txn1 is now ended, but txn2 is still running
+ set ret1 [catch {$db get -txn $txn2 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn2 $recno2} res2]
+ if { $txn1end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret1 0
+ error_check_good txn2_sees_txn2 $ret2 0
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn2_cantsee_txn1 [llength $res1] 0
+ }
+
+ # End transaction 2 second
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn2end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret3 0
+ error_check_good txn2_sees_txn2 $ret4 0
+ error_check_good txn2_has_record2 \
+ [is_substr $res4 "record2"] 1
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn2_cantsee_txn1 $ret3 0
+ error_check_good txn2_aborted [llength $res4] 0
+ }
+
+ } elseif { $endorder == "reverse" } {
+ # End transaction 2 first
+ puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 2 $e
+
+ # txn2 is ended, but txn1 is still running
+ set ret1 [catch {$db get -txn $txn1 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn1 $recno2} res2]
+ if { $txn2end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret1 0
+ error_check_good txn1_sees_txn2 $ret2 0
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn1_cantsee_txn2 [llength $res2] 0
+ }
+
+ # End transaction 1 second
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn1end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret3 0
+ error_check_good txn1_sees_txn2 $ret4 0
+ error_check_good txn1_has_record1 \
+ [is_substr $res3 "record1"] 1
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn1_cantsee_txn2 $ret4 0
+ error_check_good txn1_aborted [llength $res3] 0
+ }
+ }
+}
+
+proc how_many_locks { expected env } {
+ set stat [$env lock_stat]
+ set str "Current number of locks"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ set nlocks [lindex $statpair 1]
+ error_check_good expected_nlocks $nlocks $expected
+ }
+ }
+ error_check_good checked $checked 1
+}
diff --git a/db-4.8.30/test/lock006.tcl b/db-4.8.30/test/lock006.tcl
new file mode 100644
index 0000000..d68631e
--- /dev/null
+++ b/db-4.8.30/test/lock006.tcl
@@ -0,0 +1,186 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST lock006
+# TEST Test lock_vec interface. We do all the same things that
+# TEST lock001 does, using lock_vec instead of lock_get and lock_put,
+# TEST plus a few more things like lock-coupling.
+# TEST 1. Get and release one at a time.
+# TEST 2. Release with put_obj (all locks for a given locker/obj).
+# TEST 3. Release with put_all (all locks for a given locker).
+# TEST Regularly check lock_stat to verify all locks have been
+# TEST released.
+proc lock006 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ # Cleanup
+ env_cleanup $testdir
+
+ # Open the region we'll use for testing.
+ set eflags "-create -lock -home $testdir"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+ error_check_good lock_id_set \
+ [$env lock_id_set $lock_curid $lock_maxid] 0
+
+ puts "Lock006: test basic lock operations using lock_vec interface"
+ set locker [$env lock_id]
+ set modes {ng write read iwrite iread iwr}
+
+ # Get and release each type of lock.
+ puts "\tLock006.a: get and release one at a time"
+ foreach m $modes {
+ set obj obj$m
+ set lockp [$env lock_vec $locker "get $obj $m"]
+ error_check_good lock_vec_get:a [is_blocked $lockp] 0
+ error_check_good lock_vec_get:a [is_valid_lock $lockp $env] TRUE
+ error_check_good lock_vec_put:a \
+ [$env lock_vec $locker "put $lockp"] 0
+ }
+ how_many_locks 0 $env
+
+ # Get a bunch of locks for the same locker; these should work
+ set obj OBJECT
+ puts "\tLock006.b: Get many locks for 1 locker,\
+ release with put_all."
+ foreach m $modes {
+ set lockp [$env lock_vec $locker "get $obj $m"]
+ error_check_good lock_vec_get:b [is_blocked $lockp] 0
+ error_check_good lock_vec_get:b [is_valid_lock $lockp $env] TRUE
+ }
+ how_many_locks 6 $env
+ error_check_good release [$env lock_vec $locker put_all] 0
+ how_many_locks 0 $env
+
+ puts "\tLock006.c: Get many locks for 1 locker,\
+ release with put_obj."
+ foreach m $modes {
+ set lockp [$env lock_vec $locker "get $obj $m"]
+ error_check_good lock_vec_get:b [is_blocked $lockp] 0
+ error_check_good lock_vec_get:b [is_valid_lock $lockp $env] TRUE
+ }
+ error_check_good release [$env lock_vec $locker "put_obj $obj"] 0
+# how_many_locks 0 $env
+ how_many_locks 6 $env
+
+ # Get many locks for the same locker on more than one object.
+ # Release with put_all.
+ set obj2 OBJECT2
+ puts "\tLock006.d: Get many locks on 2 objects for 1 locker,\
+ release with put_all."
+ foreach m $modes {
+ set lockp [$env lock_vec $locker "get $obj $m"]
+ error_check_good lock_vec_get:b [is_blocked $lockp] 0
+ error_check_good lock_vec_get:b [is_valid_lock $lockp $env] TRUE
+ }
+ foreach m $modes {
+ set lockp [$env lock_vec $locker "get $obj2 $m"]
+ error_check_good lock_vec_get:b [is_blocked $lockp] 0
+ error_check_good lock_vec_get:b [is_valid_lock $lockp $env] TRUE
+ }
+ error_check_good release [$env lock_vec $locker put_all] 0
+# how_many_locks 0 $env
+ how_many_locks 6 $env
+
+ # Check that reference counted locks work.
+ puts "\tLock006.e: reference counted locks."
+ for {set i 0} { $i < 10 } {incr i} {
+ set lockp [$env lock_vec -nowait $locker "get $obj write"]
+ error_check_good lock_vec_get:c [is_blocked $lockp] 0
+ error_check_good lock_vec_get:c [is_valid_lock $lockp $env] TRUE
+ }
+ error_check_good put_all [$env lock_vec $locker put_all] 0
+# how_many_locks 0 $env
+ how_many_locks 6 $env
+
+ # Lock-coupling. Get a lock on object 1. Get a lock on object 2,
+ # release object 1, and so on.
+ puts "\tLock006.f: Lock-coupling."
+ set locker2 [$env lock_id]
+
+ foreach m { read write iwrite iread iwr } {
+ set lockp [$env lock_vec $locker "get OBJ0 $m"]
+ set iter 0
+ set nobjects 10
+ while { $iter < 3 } {
+ for { set i 1 } { $i <= $nobjects } { incr i } {
+ set lockv [$env lock_vec $locker \
+ "get OBJ$i $m" "put $lockp"]
+
+ # Make sure another locker can get an exclusive
+ # lock on the object just released.
+ set lock2p [$env lock_vec -nowait $locker2 \
+ "get OBJ[expr $i - 1] write" ]
+ error_check_good release_lock2 [$env lock_vec \
+ $locker2 "put $lock2p"] 0
+
+ # Make sure another locker can't get an exclusive
+ # lock on the object just locked.
+ catch {$env lock_vec -nowait $locker2 \
+ "get OBJ$i write"} ret
+ error_check_good not_granted \
+ [is_substr $ret "not granted"] 1
+
+ set lockp [lindex $lockv 0]
+ if { $i == $nobjects } {
+ incr iter
+ }
+ }
+ }
+ error_check_good lock_put [$env lock_vec $locker "put $lockp"] 0
+# how_many_locks 0 $env
+ how_many_locks 6 $env
+ }
+
+ # Finally try some failing locks. Set up a write lock on object.
+ foreach m { write } {
+ set lockp [$env lock_vec $locker "get $obj $m"]
+ error_check_good lock_vec_get:d [is_blocked $lockp] 0
+ error_check_good lock_vec_get:d [is_valid_lock $lockp $env] TRUE
+ }
+
+ # Change the locker
+ set newlocker [$env lock_id]
+ # Skip NO_LOCK.
+ puts "\tLock006.g: Change the locker, try to acquire read and write."
+ foreach m { read write iwrite iread iwr } {
+ catch {$env lock_vec -nowait $newlocker "get $obj $m"} ret
+ error_check_good lock_vec_get:d [is_substr $ret "not granted"] 1
+ }
+
+ # Now release original locks
+ error_check_good put_all [$env lock_vec $locker {put_all}] 0
+ error_check_good free_id [$env lock_id_free $locker] 0
+
+ # Now re-acquire blocking locks
+ puts "\tLock006.h: Re-acquire blocking locks."
+ foreach m { read write iwrite iread iwr } {
+ set lockp [$env lock_vec -nowait $newlocker "get $obj $m"]
+ error_check_good lock_get:e [is_valid_lock $lockp $env] TRUE
+ error_check_good lock_get:e [is_blocked $lockp] 0
+ }
+
+ # Now release new locks
+ error_check_good put_all [$env lock_vec $newlocker {put_all}] 0
+ error_check_good free_id [$env lock_id_free $newlocker] 0
+
+ error_check_good envclose [$env close] 0
+
+}
+
+# Blocked locks appear as lockmgrN.lockM\nBLOCKED
+proc is_blocked { l } {
+ if { [string compare $l BLOCKED ] == 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
diff --git a/db-4.8.30/test/lockscript.tcl b/db-4.8.30/test/lockscript.tcl
new file mode 100644
index 0000000..114e75b
--- /dev/null
+++ b/db-4.8.30/test/lockscript.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Random lock tester.
+# Usage: lockscript dir numiters numobjs sleepint degree readratio
+# dir: lock directory.
+# numiters: Total number of iterations.
+# numobjs: Number of objects on which to lock.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+# readratio: Percent of locks that should be reads.
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "lockscript dir numiters numobjs sleepint degree readratio"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set numobjs [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set readratio [ lindex $argv 5 ]
+
+# Initialize random number generator
+global rand_init
+berkdb srand $rand_init
+
+
+catch { berkdb_env -create -lock -home $dir } e
+error_check_good env_open [is_substr $e env] 1
+catch { $e lock_id } locker
+error_check_good locker [is_valid_locker $locker] TRUE
+
+puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
+puts "$sleepint $degree $readratio"
+flush stdout
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set locklist {}
+ set objlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj $numobjs]
+ set lastobj [expr $obj + 1]
+ set x [berkdb random_int 1 100 ]
+ if { $x <= $readratio } {
+ set rw read
+ } else {
+ set rw write
+ }
+ puts "[timestamp -c] $locker $lnum: $rw $obj"
+
+ # Do get; add to list
+ catch {$e lock_get $rw $locker $obj} lockp
+ error_check_good lock_get [is_valid_lock $lockp $e] TRUE
+
+ # Create a file to flag that we've a lock of the given
+ # type, after making sure only other read locks exist
+ # (if we're read locking) or no other locks exist (if
+ # we're writing).
+ lock003_vrfy $rw $obj
+ lock003_create $rw $obj
+ lappend objlist [list $obj $rw]
+
+ lappend locklist $lockp
+ if {$lastobj > $numobjs} {
+ break
+ }
+ }
+ # Pick sleep interval
+ puts "[timestamp -c] $locker sleeping"
+ # We used to sleep 1 to $sleepint seconds. This makes the test
+ # run for hours. Instead, make it sleep for 10 to $sleepint * 100
+ # milliseconds, for a maximum sleep time of 0.5 s.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+ puts "[timestamp -c] $locker awake"
+
+ # Now release locks
+ puts "[timestamp -c] $locker released locks"
+
+ # Delete our locking flag files, then reverify. (Note that the
+ # locking flag verification function assumes that our own lock
+ # is not currently flagged.)
+ foreach pair $objlist {
+ set obj [lindex $pair 0]
+ set rw [lindex $pair 1]
+ lock003_destroy $obj
+ lock003_vrfy $rw $obj
+ }
+
+ release_list $locklist
+ flush stdout
+}
+
+set ret [$e close]
+error_check_good env_close $ret 0
+
+puts "[timestamp -c] $locker Complete"
+flush stdout
+
+exit
diff --git a/db-4.8.30/test/log001.tcl b/db-4.8.30/test/log001.tcl
new file mode 100644
index 0000000..2fb66f5
--- /dev/null
+++ b/db-4.8.30/test/log001.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+
+# TEST log001
+# TEST Read/write log records.
+# TEST Test with and without fixed-length, in-memory logging,
+# TEST and encryption.
+proc log001 { } {
+ global passwd
+ global has_crypto
+ global rand_init
+
+ berkdb srand $rand_init
+ set iter 1000
+
+ set max [expr 1024 * 128]
+ foreach fixedlength { 0 1 } {
+ foreach inmem { 1 0 } {
+ log001_body $max $iter $fixedlength $inmem
+ log001_body $max [expr $iter * 15] $fixedlength $inmem
+
+ # Skip encrypted tests if not supported.
+ if { $has_crypto == 0 } {
+ continue
+ }
+ log001_body $max\
+ $iter $fixedlength $inmem "-encryptaes $passwd"
+ log001_body $max\
+ [expr $iter * 15] $fixedlength $inmem "-encryptaes $passwd"
+ }
+ }
+}
+
+proc log001_body { max nrecs fixedlength inmem {encargs ""} } {
+ source ./include.tcl
+
+ puts -nonewline "Log001: Basic put/get log records: "
+ if { $fixedlength == 1 } {
+ puts -nonewline "fixed-length ($encargs)"
+ } else {
+ puts -nonewline "variable-length ($encargs)"
+ }
+
+ # In-memory logging requires a large enough log buffer that
+ # any active transaction can be aborted.
+ if { $inmem == 1 } {
+ set lbuf [expr 8 * [expr 1024 * 1024]]
+ puts " with in-memory logging."
+ } else {
+ puts " with on-disk logging."
+ }
+
+ env_cleanup $testdir
+
+ set logargs ""
+ if { $inmem == 1 } {
+ set logargs "-log_inmemory -log_buffer $lbuf"
+ }
+ set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
+ $encargs $logargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We will write records to the log and make sure we can
+ # read them back correctly. We'll use a standard pattern
+ # repeated some number of times for each record.
+ set lsn_list {}
+ set rec_list {}
+ puts "\tLog001.a: Writing $nrecs log records"
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set rec ""
+ for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
+ set rec $rec$i:logrec:$i
+ }
+ if { $fixedlength != 1 } {
+ set rec $rec:[random_data 237 0 0]
+ }
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [is_substr $lsn log_cmd] 1
+ lappend lsn_list $lsn
+ lappend rec_list $rec
+ }
+
+ # Open a log cursor.
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+
+ puts "\tLog001.b: Retrieving log records sequentially (forward)"
+ set i 0
+ for { set grec [$logc get -first] } { [llength $grec] != 0 } {
+ set grec [$logc get -next]} {
+ error_check_good log_get:seq [lindex $grec 1] \
+ [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.c: Retrieving log records sequentially (backward)"
+ set i [llength $rec_list]
+ for { set grec [$logc get -last] } { [llength $grec] != 0 } {
+ set grec [$logc get -prev] } {
+ incr i -1
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ }
+
+ puts "\tLog001.d: Retrieving log records sequentially by LSN"
+ set i 0
+ foreach lsn $lsn_list {
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.e: Retrieving log records randomly by LSN"
+ set m [expr [llength $lsn_list] - 1]
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set recno [berkdb random_int 0 $m ]
+ set lsn [lindex $lsn_list $recno]
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
+ }
+
+ puts "\tLog001.f: Retrieving first/current, last/current log record"
+ set grec [$logc get -first]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set i [expr [llength $rec_list] - 1]
+ set grec [$logc get -last]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/db-4.8.30/test/log002.tcl b/db-4.8.30/test/log002.tcl
new file mode 100644
index 0000000..0b25cf8
--- /dev/null
+++ b/db-4.8.30/test/log002.tcl
@@ -0,0 +1,101 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST log002
+# TEST Tests multiple logs
+# TEST Log truncation
+# TEST LSN comparison and file functionality.
+proc log002 { } {
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ foreach inmem { 1 0 } {
+ log002_body $inmem
+ }
+}
+
+proc log002_body { inmem } {
+ source ./include.tcl
+
+ puts "Log002: Multiple log test w/trunc, file, compare functionality"
+
+ env_cleanup $testdir
+
+ set max [expr 1024 * 128]
+
+ set logargs ""
+ if { $inmem == 0 } {
+ puts "Log002: Using on-disk logging."
+ } else {
+ puts "Log002: Using in-memory logging."
+ set lbuf [expr 8 * [expr 1024 * 1024]]
+ set logargs "-log_inmemory -log_buffer $lbuf"
+ }
+ set env [eval {berkdb_env} -create -home $testdir -log \
+ -mode 0644 $logargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We'll record every hundredth record for later use
+ set info_list {}
+
+ puts "\tLog002.a: Writing log records"
+ set i 0
+ for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
+ set rec [random_data 120 0 0]
+ set len [string length $rec]
+ set lsn [$env log_put $rec]
+
+ if { [expr $i % 100 ] == 0 } {
+ lappend info_list [list $lsn $rec]
+ }
+ incr i
+ }
+
+ puts "\tLog002.b: Checking log_compare"
+ set last {0 0}
+ foreach p $info_list {
+ set l [lindex $p 0]
+ if { [llength $last] != 0 } {
+ error_check_good \
+ log_compare [$env log_compare $l $last] 1
+ error_check_good \
+ log_compare [$env log_compare $last $l] -1
+ error_check_good \
+ log_compare [$env log_compare $l $l] 0
+ }
+ set last $l
+ }
+
+ puts "\tLog002.c: Checking log_file"
+ if { $inmem == 0 } {
+ set flist [glob $testdir/log*]
+ foreach p $info_list {
+ set lsn [lindex $p 0]
+ set f [$env log_file $lsn]
+
+ # Change backslash separators on Windows to forward
+ # slashes, as the rest of the test suite expects.
+ regsub -all {\\} $f {/} f
+ error_check_bad log_file:$f [lsearch $flist $f] -1
+ }
+ }
+
+ puts "\tLog002.d: Verifying records"
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
+ set p [lindex $info_list $i]
+ set grec [$logc get -set [lindex $p 0]]
+ error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/db-4.8.30/test/log003.tcl b/db-4.8.30/test/log003.tcl
new file mode 100644
index 0000000..562db72
--- /dev/null
+++ b/db-4.8.30/test/log003.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST log003
+# TEST Verify that log_flush is flushing records correctly.
+proc log003 { } {
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ # Even though log_flush doesn't do anything for in-memory
+ # logging, we want to make sure calling it doesn't break
+ # anything.
+ foreach inmem { 1 0 } {
+ log003_body $inmem
+ }
+}
+
+proc log003_body { inmem } {
+ source ./include.tcl
+
+ puts -nonewline "Log003: Verify log_flush behavior"
+ if { $inmem == 0 } {
+ puts " (on-disk logging)."
+ } else {
+ puts " (in-memory logging)."
+ }
+
+ set max [expr 1024 * 128]
+ env_cleanup $testdir
+ set short_rec "abcdefghijklmnopqrstuvwxyz"
+ set long_rec [repeat $short_rec 200]
+ set very_long_rec [repeat $long_rec 4]
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
+
+ set logargs ""
+ if { $inmem == 1 } {
+ set logargs "-log_inmemory -log_buffer [expr $max * 2]"
+ }
+ set env [eval {berkdb_env} -log -home $testdir -create \
+ -mode 0644 $logargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ set ret [$env log_flush $lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+ set ret [$env close]
+ error_check_good log_env:close $ret 0
+
+ # Now, remove the log region
+ #set ret [berkdb envremove -home $testdir]
+ #error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env -create -home $testdir \
+ -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -first]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+
+ if { $inmem == 1 } {
+ puts "Log003: Skipping remainder of test for in-memory logging."
+ return
+ }
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.b: \
+ Verify flush on non-last record [string length $rec]"
+
+ set env [berkdb_env -log -home $testdir \
+ -create -mode 0644 -log_max $max]
+
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Put 10 random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Put the record we are interested in
+ set save_lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
+
+ # Put 10 more random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Now check the flush
+ set ret [$env log_flush $save_lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash".
+ #
+ # Now, close and remove the log region
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env -log -home $testdir \
+ -create -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -set $save_lsn]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+}
diff --git a/db-4.8.30/test/log004.tcl b/db-4.8.30/test/log004.tcl
new file mode 100644
index 0000000..573cf59
--- /dev/null
+++ b/db-4.8.30/test/log004.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+
+# TEST log004
+# TEST Make sure that if we do PREVs on a log, but the beginning of the
+# TEST log has been truncated, we do the right thing.
+proc log004 { } {
+ foreach inmem { 1 0 } {
+ log004_body $inmem
+ }
+}
+
+proc log004_body { inmem } {
+ source ./include.tcl
+
+ puts "Log004: Prev on log when beginning of log has been truncated."
+ # Use archive test to populate log
+ env_cleanup $testdir
+ puts "\tLog004.a: Call archive to populate log."
+ archive $inmem
+
+ # Delete all log files under 100
+ puts "\tLog004.b: Delete all log files under 100."
+ set ret [catch { glob $testdir/log.00000000* } result]
+ if { $ret == 0 } {
+ eval fileremove -f $result
+ }
+
+ # Now open the log and get the first record and try a prev
+ puts "\tLog004.c: Open truncated log, attempt to access missing portion."
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set ret [$logc get -first]
+ error_check_bad log_get [llength $ret] 0
+
+ # This should give DB_NOTFOUND which is a ret of length 0
+ catch {$logc get -prev} ret
+ error_check_good log_get_prev [string length $ret] 0
+
+ puts "\tLog004.d: Close log and environment."
+ error_check_good log_cursor_close [$logc close] 0
+ error_check_good log_close [$env close] 0
+}
diff --git a/db-4.8.30/test/log005.tcl b/db-4.8.30/test/log005.tcl
new file mode 100644
index 0000000..1f0639d
--- /dev/null
+++ b/db-4.8.30/test/log005.tcl
@@ -0,0 +1,117 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST log005
+# TEST Check that log file sizes can change on the fly.
+proc log005 { } {
+
+ # Skip the test for HP-UX, where we can't do the second
+ # env open.
+ global is_hp_test
+ if { $is_hp_test == 1 } {
+ puts "Log005: Skipping for HP-UX."
+ return
+ }
+
+ foreach inmem { 1 0 } {
+ log005_body $inmem
+ }
+}
+proc log005_body { inmem } {
+ source ./include.tcl
+ env_cleanup $testdir
+
+ puts -nonewline "Log005: Check that log file sizes can change"
+ if { $inmem == 0 } {
+ puts " (on-disk logging)."
+ } else {
+ puts " (in-memory logging)."
+ }
+
+ # Open the environment, set and check the log file size.
+ puts "\tLog005.a: open, set and check the log file size."
+ set logargs ""
+ if { $inmem == 1 } {
+ set lbuf [expr 1024 * 1024]
+ set logargs "-log_inmemory -log_buffer $lbuf"
+ }
+ set env [eval {berkdb_env} -create -home $testdir \
+ $logargs -log_max 1000000 -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set db [berkdb_open \
+ -env $env -create -mode 0644 -btree -auto_commit a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Get the current log file maximum.
+ set max [log005_stat $env "Current log file size"]
+ error_check_good max_set $max 1000000
+
+ # Reset the log file size using a second open, and make sure
+ # it changes.
+ puts "\tLog005.b: reset during open, check the log file size."
+ set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
+ error_check_good envtmp_open [is_valid_env $envtmp] TRUE
+ error_check_good envtmp_close [$envtmp close] 0
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good max_changed 900000 $tmp
+
+ puts "\tLog005.c: fill in the current log file size."
+ # Fill in the current log file.
+ set new_lsn 0
+ set data [repeat "a" 1024]
+ for { set i 1 } \
+ { [log005_stat $env "Current log file number"] != 2 } \
+ { incr i } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db put -txn $t $i $data]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set last_lsn $new_lsn
+ set new_lsn [log005_stat $env "Current log file offset"]
+ }
+
+ # The last LSN in the first file should be more than our new
+ # file size.
+ error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
+
+ # Close down the environment.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ if { $inmem == 1 } {
+ puts "Log005: Skipping remainder of test for in-memory logging."
+ return
+ }
+
+ puts "\tLog005.d: check the log file size is unchanged after recovery."
+ # Open again, running recovery. Verify the log file size is as we
+ # left it.
+ set env [berkdb_env -create -home $testdir -recover -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good after_recovery 900000 $tmp
+
+ error_check_good env_close [$env close] 0
+}
+
+# log005_stat --
+# Return the current log statistics.
+proc log005_stat { env s } {
+ set stat [$env log_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: log005: stat string $s not found"
+ return 0
+}
diff --git a/db-4.8.30/test/log006.tcl b/db-4.8.30/test/log006.tcl
new file mode 100644
index 0000000..47ef091
--- /dev/null
+++ b/db-4.8.30/test/log006.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST log006
+# TEST Test log file auto-remove.
+# TEST Test normal operation.
+# TEST Test a long-lived txn.
+# TEST Test log_archive flags.
+# TEST Test db_archive flags.
+# TEST Test turning on later.
+# TEST Test setting via DB_CONFIG.
+proc log006 { } {
+ source ./include.tcl
+
+ puts "Log006: Check auto-remove of log files."
+ env_cleanup $testdir
+
+ # Open the environment, set auto-remove flag. Use smaller log
+ # files to make more of them.
+ puts "\tLog006.a: open environment, populate database."
+ set lbuf 16384
+ set lmax 65536
+ set env [berkdb_env_noerr -log_remove \
+ -create -home $testdir -log_buffer $lbuf -log_max $lmax -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ log006_put $testdir $env
+
+ #
+ # Check log files. Using the small log file size, we should have
+ # have made a lot of log files. Check that we have only a few left.
+ # Dividing by 5 tests that at least 80% of the files are gone.
+ #
+ set log_number [stat_field $env log_stat "Current log file number"]
+ set log_expect [expr $log_number / 5]
+
+ puts "\tLog006.b: Check log files removed."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ set remlen [llength $lfiles]
+ error_check_good lfiles_len [expr $remlen < $log_expect] 1
+ error_check_good lfiles [lsearch $lfiles $testdir/log.0000000001] -1
+ # Save last log file for later check.
+ # Files may not be sorted, sort them and then save the last filename.
+ set oldfile [lindex [lsort -ascii $lfiles] end]
+
+ # Rerun log006_put with a long lived txn.
+ #
+ puts "\tLog006.c: Rerun put loop with long-lived transaction."
+ cleanup $testdir $env
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ # Give the txn something to do so no files can be removed.
+ set testfile temp.db
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ -env $env -txn $txn -pagesize 8192 -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ log006_put $testdir $env
+
+ puts "\tLog006.d: Check log files not removed."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ error_check_good lfiles2_len [expr [llength $lfiles] > $remlen] 1
+ set lfiles [lsort -ascii $lfiles]
+ error_check_good lfiles_chk [lsearch $lfiles $oldfile] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good ckp1 [$env txn_checkpoint] 0
+ error_check_good ckp2 [$env txn_checkpoint] 0
+
+ puts "\tLog006.e: Run log_archive with -auto_remove flag."
+ # When we're done, only the last log file should remain.
+ set lfiles [glob -nocomplain $testdir/log.*]
+ set oldfile [lindex [lsort -ascii $lfiles] end]
+
+ # First, though, verify mutual-exclusiveness of flag.
+ foreach f {-arch_abs -arch_data -arch_log} {
+ set stat [catch {eval $env log_archive -arch_remove $f} ret]
+ error_check_good stat $stat 1
+ error_check_good la:$f:fail [is_substr $ret "illegal flag"] 1
+ }
+ # Now run it for real.
+ set stat [catch {$env log_archive -arch_remove} ret]
+ error_check_good stat $stat 0
+
+ puts "\tLog006.f: Check only $oldfile remains."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ error_check_good 1log [llength $lfiles] 1
+ error_check_good lfiles_chk [lsearch $lfiles $oldfile] 0
+
+ puts "\tLog006.g: Rerun put loop with long-lived transaction."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ log006_put $testdir $env
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good ckp1 [$env txn_checkpoint] 0
+ error_check_good ckp2 [$env txn_checkpoint] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Test db_archive's auto-remove flag.
+ # After we are done, only the last log file should be there.
+ # First check that the delete flag cannot be used with any
+ # of the other flags.
+ #
+ puts "\tLog006.h: Run db_archive with delete flag."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ set oldfile [lindex [lsort -ascii $lfiles] end]
+ #
+ # Again, first check illegal flag combinations with db_archive.
+ #
+ foreach f {-a -l -s} {
+ set stat [catch {exec $util_path/db_archive $f -d -h $testdir} \
+ ret]
+ error_check_good stat $stat 1
+ error_check_good la:fail [is_substr $ret "illegal flag"] 1
+ }
+ set stat [catch {exec $util_path/db_archive -d -h $testdir} ret]
+ error_check_good stat $stat 0
+
+ puts "\tLog006.i: Check only $oldfile remains."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ error_check_good 1log [llength $lfiles] 1
+ error_check_good lfiles_chk [lsearch $lfiles $oldfile] 0
+
+ #
+ # Now rerun some parts with other env settings tested.
+ #
+ env_cleanup $testdir
+
+ # First test that the option can be turned on later.
+ # 1. Open env w/o auto-remove.
+ # 2. Run log006_put.
+ # 3. Verify log files all there.
+ # 4. Call env set_flags to turn it on.
+ # 5. Run log006_put.
+ # 6. Verify log files removed.
+ puts "\tLog006.j: open environment w/o auto remove, populate database."
+ set env [berkdb_env -recover \
+ -create -home $testdir -log_buffer $lbuf -log_max $lmax -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ log006_put $testdir $env
+
+ puts "\tLog006.k: Check log files not removed."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ error_check_good lfiles2_len [expr [llength $lfiles] > $remlen] 1
+ set lfiles [lsort -ascii $lfiles]
+ error_check_good lfiles [lsearch $lfiles $testdir/log.0000000001] 0
+
+ puts "\tLog006.l: turn on auto remove and repopulate database."
+ error_check_good sf [$env log_config "autoremove on"] 0
+
+ log006_put $testdir $env
+
+ puts "\tLog006.m: Check log files removed."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ error_check_good lfiles_len [expr [llength $lfiles] < $log_expect] 1
+ error_check_good lfiles [lsearch $lfiles $testdir/log.0000000001] -1
+ error_check_good env_close [$env close] 0
+
+ #
+ # Configure via DB_CONFIG.
+ #
+ env_cleanup $testdir
+
+ puts "\tLog006.n: Test setting via DB_CONFIG."
+ # Open the environment, w/o remove flag, but DB_CONFIG.
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_log_config db_log_auto_remove"
+ close $cid
+ set env [berkdb_env -recover \
+ -create -home $testdir -log_buffer $lbuf -log_max $lmax -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ log006_put $testdir $env
+
+ puts "\tLog006.o: Check log files removed."
+ set lfiles [glob -nocomplain $testdir/log.*]
+ error_check_good lfiles_len [expr [llength $lfiles] < $log_expect] 1
+ error_check_good lfiles [lsearch $lfiles $testdir/log.0000000001] -1
+ error_check_good env_close [$env close] 0
+
+}
+
+#
+# Modified from test003.
+#
+proc log006_put { testdir env } {
+ set testfile log006.db
+ #
+ # Specify a pagesize so we can control how many log files
+ # are created and left over.
+ #
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ -env $env -auto_commit -pagesize 8192 -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set lmax [$env get_lg_max]
+ set file_list [get_file_list]
+ set count 0
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+ set key $f
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ # Read in less than the maximum log size.
+ set data [read $fid [expr $lmax - [expr $lmax / 8]]]
+ close $fid
+
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ if { $count % 10 == 0 } {
+ error_check_good ckp($count) [$env txn_checkpoint] 0
+ }
+
+ incr count
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/log007.tcl b/db-4.8.30/test/log007.tcl
new file mode 100644
index 0000000..7c5bda4
--- /dev/null
+++ b/db-4.8.30/test/log007.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST log007
+# TEST Test of in-memory logging bugs. [#11505]
+# TEST
+# TEST Test db_printlog with in-memory logs.
+#
+proc log007 { } {
+ global testdir
+ global util_path
+ set tnum "007"
+
+ puts "Log$tnum: Test in-memory logs with db_printlog."
+
+ # Log size is small so we quickly create more than one.
+ # Since we are in-memory the buffer is larger than the
+ # file size.
+ set pagesize 4096
+ append args " -pagesize $pagesize "
+ set log_max [expr $pagesize * 2]
+ set log_buf [expr $log_max * 2]
+
+ # We have 13-byte records. We want to fill slightly more
+ # than one virtual log file on each iteration. The first
+ # record always has an offset of 28.
+ #
+ set recsize 13
+ set recsperfile [expr [expr $log_max - 28] / $recsize]
+ set nrecs [expr $recsperfile + 1]
+
+ # Open environment.
+ env_cleanup $testdir
+ set flags " -create -txn -home $testdir \
+ -log_inmemory -log_buffer $log_buf -log_max $log_max"
+ set env [eval {berkdb_env} $flags]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set iter 15
+ set lastfile 1
+ for { set i 0 } { $i < $iter } { incr i } {
+ puts "\tLog$tnum.a.$i: Writing $nrecs 13-byte log records."
+ set lsn_list {}
+ for { set j 0 } { $j < $nrecs } { incr j } {
+ set rec "1"
+ # Make the first record one byte larger for each
+ # successive log file so we hit the end of the
+ # log file at each of the 13 possibilities.
+ set nentries [expr [expr $i * $nrecs] + $j]
+ if { [expr $nentries % 628] == 0 } {
+ append firstrec a
+ set ret [$env log_put $firstrec]
+ } else {
+ set ret [$env log_put $rec]
+ }
+ error_check_bad log_put [is_substr $ret log_cmd] 1
+ lappend lsn_list $ret
+ }
+
+ # Open a log cursor.
+ set m_logc [$env log_cursor]
+ error_check_good m_logc [is_valid_logc $m_logc $env] TRUE
+
+ # Check that we're in the expected virtual log file.
+ set first [$m_logc get -first]
+ error_check_good first_lsn [lindex $first 0] "[expr $i + 1] 28"
+ set last [$m_logc get -last]
+
+ puts "\tLog$tnum.b.$i: Read log records sequentially."
+ set j 0
+ for { set logrec [$m_logc get -first] } \
+ { [llength $logrec] != 0 } \
+ { set logrec [$m_logc get -next]} {
+ set file [lindex [lindex $logrec 0] 0]
+ if { $file != $lastfile } {
+ # We have entered a new virtual log file.
+ set lastfile $file
+ }
+ set offset [lindex [lindex $logrec 0] 1]
+ set lsn($j) "\[$file\]\[$offset\]"
+ incr j
+ }
+ error_check_good cursor_close [$m_logc close] 0
+
+ puts "\tLog$tnum.c.$i: Compare printlog to log records."
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $testdir > $testdir/prlog} result]
+ error_check_good stat_prlog $stat 0
+
+ # Make sure the results of printlog contain all the same
+ # LSNs we saved when walking the files with the log cursor.
+ set j 0
+ set fd [open $testdir/prlog r]
+ while { [gets $fd record] >= 0 } {
+ # A log record begins with "[".
+ if { [string match {\[*} $record] == 1 } {
+ error_check_good \
+ check_prlog [is_substr $record $lsn($j)] 1
+ incr j
+ }
+ }
+ close $fd
+ }
+
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+}
diff --git a/db-4.8.30/test/log008.tcl b/db-4.8.30/test/log008.tcl
new file mode 100644
index 0000000..afa7968
--- /dev/null
+++ b/db-4.8.30/test/log008.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST log008
+# TEST Test what happens if a txn_ckp record falls into a
+# TEST different log file than the DBREG_CKP records generated
+# TEST by the same checkpoint.
+
+proc log008 { { nhandles 100 } args } {
+ source ./include.tcl
+ set tnum "008"
+
+ puts "Log$tnum: Checkpoint test with records spanning log files."
+ env_cleanup $testdir
+
+ # Set up env command for use later.
+ set envcmd "berkdb_env -create -txn -home $testdir"
+
+ # Start up a child process which will open a bunch of handles
+ # on a database and write to it, running until it creates a
+ # checkpoint with records spanning two log files.
+ puts "\tLog$tnum.a: Spawning child tclsh."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ log008script.tcl $testdir/log008script.log $nhandles &]
+
+ watch_procs $pid 3
+
+ puts "\tLog$tnum.b: Child is done."
+
+ # Join the env with recovery. This ought to work.
+ puts "\tLog$tnum.c: Join abandoned child env with recovery."
+ set env [eval $envcmd -recover]
+
+ # Clean up.
+ error_check_good env_close [$env close] 0
+
+ # Check log file for failures.
+ set errstrings [eval findfail $testdir/log008script.log]
+ foreach str $errstrings {
+ puts "FAIL: error message in log008 log file: $str"
+ }
+}
+
diff --git a/db-4.8.30/test/log008script.tcl b/db-4.8.30/test/log008script.tcl
new file mode 100644
index 0000000..bffa5e9
--- /dev/null
+++ b/db-4.8.30/test/log008script.tcl
@@ -0,0 +1,82 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Log008 script - dbreg_ckp and txn_ckp records spanning log files.
+#
+# Usage: log008script
+
+source ./include.tcl
+set tnum "008"
+set usage "log008script nhandles"
+
+# Verify usage
+if { $argc != 1 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set nhandles [ lindex $argv 0 ]
+
+# We make the log files small so it's likely that the
+# records will end up in different files.
+set maxbsize [expr 8 * 1024]
+set maxfile [expr 32 * 1024]
+
+# Set up environment.
+set envcmd "berkdb_env -create -txn -home $testdir \
+ -log_buffer $maxbsize -log_max $maxfile"
+set dbenv [eval $envcmd]
+error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+# Open a lot of database handles.
+set filename TESTFILE
+set handlelist {}
+for { set i 0 } { $i < $nhandles } { incr i } {
+ set db [berkdb_open \
+ -create -env $dbenv -auto_commit -btree $filename]
+ lappend handlelist $db
+}
+
+# Fill log files, checking LSNs before and after a checkpoint,
+# until we generate a case where the records span two log files.
+set i 0
+while { 1 } {
+ set txn [$dbenv txn]
+ foreach handle $handlelist {
+ error_check_good \
+ db_put [$handle put -txn $txn key.$i data.$i] 0
+ incr i
+ }
+ error_check_good txn_commit [$txn commit] 0
+
+ # Find current LSN file number.
+ set filenum [stat_field $dbenv log_stat "Current log file number"]
+
+ # Checkpoint.
+ error_check_good checkpoint [$dbenv txn_checkpoint] 0
+
+ # Find current LSN.
+ set newfilenum [stat_field $dbenv log_stat "Current log file number"]
+ if { [expr $newfilenum > $filenum] } {
+ break
+ }
+}
+
+# Do one more transactional operation per fileid.
+set txn [$dbenv txn]
+foreach handle $handlelist {
+ error_check_good \
+ db_put [$handle put -txn $txn key.$i data.$i] 0
+ incr i
+}
+error_check_good txn_commit [$txn commit] 0
+
+# Archive, deleting the log files we think we no longer need.
+set stat [eval exec $util_path/db_archive -d -h $testdir]
+
+# Child is done. Exit, abandoning the env instead of closing it.
+exit
diff --git a/db-4.8.30/test/log009.tcl b/db-4.8.30/test/log009.tcl
new file mode 100644
index 0000000..29211db
--- /dev/null
+++ b/db-4.8.30/test/log009.tcl
@@ -0,0 +1,122 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST log009
+# TEST Test of logging and getting log file version information.
+# TEST Each time we cross a log file boundary verify we can
+# TEST get the version via the log cursorlag.
+# TEST Do this both forward and backward.
+#
+proc log009 { } {
+ source ./include.tcl
+ global errorInfo
+
+ env_cleanup $testdir
+ set niter 200
+ set method btree
+
+ puts "Log009: Retrieve log version using log cursor."
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ # Open an env.
+ set envcmd "berkdb_env_noerr -create \
+ -log_buffer $log_buf -log_max $log_max -txn -home $testdir"
+ set env [eval $envcmd]
+ error_check_good env [is_valid_env $env] TRUE
+
+ set stop 0
+ set start 0
+ #
+ # Loop until we have at least 3 log files.
+ #
+ while { $stop == 0 } {
+ puts "\tLog009.a: Running test in to generate log files."
+ eval rep_test \
+ $method $env NULL $niter $start $start 0 0 $largs
+ incr start $niter
+
+ set last_log [get_logfile $env last]
+ if { $last_log >= 3 } {
+ set stop 1
+ }
+ }
+
+ # We now have at least 3 log files. Walk a cursor both ways
+ # through the log and make sure we can get the version when we
+ # cross a log file boundary.
+ set curfile 0
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+
+ puts "\tLog009.b: Try to get version on unset cursor."
+ set stat [catch {eval $logc version} ret]
+ error_check_bad stat $stat 0
+ error_check_good err [is_substr $ret "unset cursor"] 1
+
+ # Check walking forward through logs looking for log
+ # file boundaries.
+ #
+ puts "\tLog009.c: Walk log forward checking persist."
+ for { set logrec [$logc get -first] } \
+ { [llength $logrec] != 0 } \
+ { set logrec [$logc get -next] } {
+ set lsn [lindex $logrec 0]
+ set lsnfile [lindex $lsn 0]
+ if { $curfile != $lsnfile } {
+ log009_check $logc $logrec
+ set curfile $lsnfile
+ }
+ }
+ error_check_good logclose [$logc close] 0
+
+ set curfile 0
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+ #
+ # Check walking backward through logs looking for log
+ # file boundaries.
+ #
+ puts "\tLog009.d: Walk log backward checking persist."
+ for { set logrec [$logc get -last] } \
+ { [llength $logrec] != 0 } \
+ { set logrec [$logc get -prev] } {
+ set lsn [lindex $logrec 0]
+ set lsnfile [lindex $lsn 0]
+ if { $curfile != $lsnfile } {
+ log009_check $logc $logrec
+ set curfile $lsnfile
+ }
+ }
+ error_check_good logclose [$logc close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc log009_check { logc logrec } {
+ set version [$logc version]
+ #
+ # We don't have ready access to the current log
+ # version, but make sure it is something reasonable.
+ #
+ # !!!
+ # First readable log is 8, current log version
+ # is pretty far from 20.
+ #
+ set reasonable [expr $version > 7 && $version < 20]
+ error_check_good persist $reasonable 1
+ #
+ # Verify that getting the version doesn't move
+ # or change the log cursor in any way.
+ #
+ set logrec1 [$logc get -current]
+ error_check_good current $logrec $logrec1
+}
diff --git a/db-4.8.30/test/logtrack.list b/db-4.8.30/test/logtrack.list
new file mode 100644
index 0000000..8b31b3f
--- /dev/null
+++ b/db-4.8.30/test/logtrack.list
@@ -0,0 +1,60 @@
+PREFIX __crdel
+BEGIN metasub 42 142
+BEGIN inmem_create 44 138
+BEGIN inmem_rename 44 139
+BEGIN inmem_remove 44 140
+PREFIX __db
+BEGIN addrem 42 41
+BEGIN big 42 43
+BEGIN ovref 42 44
+BEGIN debug 42 47
+BEGIN noop 42 48
+BEGIN pg_alloc 43 49
+BEGIN pg_free 43 50
+BEGIN cksum 42 51
+BEGIN pg_freedata 43 52
+BEGIN pg_init 43 60
+BEGIN pg_trunc 49 66
+PREFIX __dbreg
+BEGIN register 42 2
+PREFIX __bam
+BEGIN split 48 62
+BEGIN rsplit 42 63
+BEGIN adj 42 55
+BEGIN cadjust 42 56
+BEGIN cdel 42 57
+BEGIN repl 42 58
+BEGIN root 42 59
+BEGIN curadj 42 64
+BEGIN rcuradj 42 65
+BEGIN relink 44 147
+BEGIN merge 47 148
+BEGIN pgno 44 149
+PREFIX __fop
+BEGIN create 48 143
+BEGIN remove 42 144
+BEGIN write 48 145
+BEGIN rename 48 146
+BEGIN file_remove 42 141
+PREFIX __ham
+BEGIN insdel 42 21
+BEGIN newpage 42 22
+BEGIN splitdata 42 24
+BEGIN replace 42 25
+BEGIN copypage 42 28
+BEGIN metagroup 43 29
+BEGIN groupalloc 43 32
+BEGIN curadj 42 33
+BEGIN chgpg 42 34
+PREFIX __qam
+BEGIN incfirst 42 84
+BEGIN mvptr 42 85
+BEGIN del 42 79
+BEGIN add 42 80
+BEGIN delext 42 83
+PREFIX __txn
+BEGIN regop 44 10
+BEGIN ckp 43 11
+BEGIN child 42 12
+BEGIN prepare 48 13
+BEGIN recycle 42 14
diff --git a/db-4.8.30/test/logtrack.tcl b/db-4.8.30/test/logtrack.tcl
new file mode 100644
index 0000000..a89aeda
--- /dev/null
+++ b/db-4.8.30/test/logtrack.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# logtrack.tcl: A collection of routines, formerly implemented in Perl
+# as log.pl, to track which log record types the test suite hits.
+
+set ltsname "logtrack_seen.db"
+set ltlist $test_path/logtrack.list
+set tmpname "logtrack_tmp"
+
+proc logtrack_clean { } {
+ global ltsname
+
+ file delete -force $ltsname
+
+ return
+}
+
+proc logtrack_init { } {
+ global ltsname
+
+ logtrack_clean
+
+ # Create an empty tracking database.
+ [berkdb_open -create -truncate -btree $ltsname] close
+
+ return
+}
+
+# Dump the logs for directory dirname and record which log
+# records were seen.
+proc logtrack_read { dirname } {
+ global ltsname tmpname util_path
+ global encrypt passwd
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+
+ file delete -force $tmpname
+ set pargs " -N -h $dirname "
+ if { $encrypt > 0 } {
+ append pargs " -P $passwd "
+ }
+ set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
+ error_check_good printlog $ret 0
+ error_check_good tmpfile_exists [file exists $tmpname] 1
+
+ set f [open $tmpname r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good seendb_put [$seendb put $name ""] 0
+ }
+ }
+ close $f
+ file delete -force $tmpname
+
+ error_check_good seendb_close [$seendb close] 0
+}
+
+# Print the log record types that were seen but should not have been
+# seen and the log record types that were not seen but should have been seen.
+proc logtrack_summary { } {
+ global ltsname ltlist testdir
+ global one_test
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+ set existdb [berkdb_open -create -btree]
+ error_check_good existdb_open [is_valid_db $existdb] TRUE
+ set deprecdb [berkdb_open -create -btree]
+ error_check_good deprecdb_open [is_valid_db $deprecdb] TRUE
+
+ error_check_good ltlist_exists [file exists $ltlist] 1
+ set f [open $ltlist r]
+ set pref ""
+ while { [gets $f line] >= 0 } {
+ # Get the keyword, the first thing on the line:
+ # BEGIN/DEPRECATED/IGNORED/PREFIX
+ set keyword [lindex $line 0]
+
+ if { [string compare $keyword PREFIX] == 0 } {
+ # New prefix.
+ set pref [lindex $line 1]
+ } elseif { [string compare $keyword BEGIN] == 0 } {
+ # A log type we care about; put it on our list.
+
+ # Skip noop and debug.
+ if { [string compare [lindex $line 1] noop] == 0 } {
+ continue
+ }
+ if { [string compare [lindex $line 1] debug] == 0 } {
+ continue
+ }
+
+ error_check_good exist_put [$existdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ } elseif { [string compare $keyword DEPRECATED] == 0 ||
+ [string compare $keyword IGNORED] == 0 } {
+ error_check_good deprec_put [$deprecdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ }
+ }
+
+ error_check_good exist_curs \
+ [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$ec get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$seendb count $rec] == 0 && $one_test == "ALL" } {
+ if { $rec == "__db_pg_prepare" } {
+ puts "WARNING: log record type $rec can be\
+ seen only on systems without FTRUNCATE."
+ }
+ puts "WARNING: log record type $rec: not tested"
+ }
+ }
+ error_check_good exist_curs_close [$ec close] 0
+
+ error_check_good seen_curs \
+ [is_valid_cursor [set sc [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$sc get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$existdb count $rec] == 0 } {
+ if { [$deprecdb count $rec] == 0 } {
+ puts "WARNING: log record type $rec: unknown"
+ } else {
+ puts \
+ "WARNING: log record type $rec: deprecated"
+ }
+ }
+ }
+ error_check_good seen_curs_close [$sc close] 0
+
+ error_check_good seendb_close [$seendb close] 0
+ error_check_good existdb_close [$existdb close] 0
+ error_check_good deprecdb_close [$deprecdb close] 0
+
+ logtrack_clean
+}
diff --git a/db-4.8.30/test/mdbscript.tcl b/db-4.8.30/test/mdbscript.tcl
new file mode 100644
index 0000000..1282196
--- /dev/null
+++ b/db-4.8.30/test/mdbscript.tcl
@@ -0,0 +1,402 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Process script for the multi-process db tester.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+global dbenv
+global klock
+global l_keys
+global procid
+global alphabet
+
+# In Tcl, when there are multiple catch handlers, *all* handlers
+# are called, so we have to resort to this hack.
+#
+global exception_handled
+
+set exception_handled 0
+
+set datastr $alphabet$alphabet
+
+# Usage: mdbscript dir file nentries iter procid procs seed
+# dir: DBHOME directory
+# file: db file on which to operate
+# nentries: number of entries taken from dictionary
+# iter: number of operations to run
+# procid: this processes' id number
+# procs: total number of processes running
+set usage "mdbscript method dir file nentries iter procid procs"
+
+# Verify usage
+if { $argc < 7 } {
+ puts "FAIL:[timestamp] test042: Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set dir [lindex $argv 1]
+set file [lindex $argv 2]
+set nentries [ lindex $argv 3 ]
+set iter [ lindex $argv 4 ]
+set procid [ lindex $argv 5 ]
+set procs [ lindex $argv 6 ]
+set args [ lindex $argv 7 ]
+
+set pflags ""
+set gflags ""
+set txn ""
+
+set renum [is_rrecno $method]
+set omethod [convert_method $method]
+
+if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+}
+
+# Initialize seed
+global rand_init
+
+# We want repeatable results, but we also want each instance of mdbscript
+# to do something different. So we add the procid to the fixed seed.
+# (Note that this is a serial number given by the caller, not a pid.)
+berkdb srand [expr $rand_init + $procid]
+
+puts "Beginning execution for [pid] $method"
+puts "$dir db_home"
+puts "$file database"
+puts "$nentries data elements"
+puts "$iter iterations"
+puts "$procid process id"
+puts "$procs processes"
+eval set args $args
+puts "args: $args"
+
+set klock NOLOCK
+
+# Note: all I/O operations, and especially flush, are expensive
+# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
+# flushes in the main part of the loop below.
+flush stdout
+
+set dbenv [berkdb_env -create -cdb -home $dir]
+#set dbenv [berkdb_env -create -cdb -log -home $dir]
+error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+set locker [ $dbenv lock_id ]
+
+set db [eval {berkdb_open} -env $dbenv $omethod $args {$file}]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Init globals (no data)
+set nkeys [db_init $db 0]
+puts "Initial number of keys: $nkeys"
+tclsleep 5
+
+proc get_lock { k } {
+ global dbenv
+ global procid
+ global locker
+ global klock
+ global DB_LOCK_WRITE
+ global DB_LOCK_NOWAIT
+ global errorInfo
+ global exception_handled
+ # Make sure that the key isn't in the middle of
+ # a delete operation
+ if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
+ set exception_handled 1
+
+ error_check_good \
+ get_lock [is_substr $errorInfo "DB_LOCK_NOTGRANTED"] 1
+ puts "Warning: key $k locked"
+ set klock NOLOCK
+ return 1
+ } else {
+ error_check_good get_lock [is_valid_lock $klock $dbenv] TRUE
+ }
+ return 0
+}
+
+# If we are renumbering, then each time we delete an item, the number of
+# items in the file is temporarily decreased, so the highest record numbers
+# do not exist. To make sure this doesn't happen, we never generate the
+# highest few record numbers as keys.
+#
+# For record-based methods, record numbers begin at 1, while for other keys,
+# we begin at 0 to index into an array.
+proc rand_key { method nkeys renum procs} {
+ if { $renum == 1 } {
+ return [berkdb random_int 1 [expr $nkeys - $procs]]
+ } elseif { [is_record_based $method] == 1 } {
+ return [berkdb random_int 1 $nkeys]
+ } else {
+ return [berkdb random_int 0 [expr $nkeys - 1]]
+ }
+}
+
+# On each iteration we're going to randomly pick a key.
+# 1. We'll either get it (verifying that its contents are reasonable).
+# 2. Put it (using an overwrite to make the data be datastr:ID).
+# 3. Get it and do a put through the cursor, tacking our ID on to
+# 4. Get it, read forward some random number of keys.
+# 5. Get it, read forward some random number of keys and do a put (replace).
+# 6. Get it, read forward some random number of keys and do a del. And then
+# do a put of the key.
+set gets 0
+set getput 0
+set overwrite 0
+set seqread 0
+set seqput 0
+set seqdel 0
+set dlen [string length $datastr]
+
+for { set i 0 } { $i < $iter } { incr i } {
+ set op [berkdb random_int 0 5]
+ puts "iteration $i operation $op"
+ set close_cursor 0
+ if {[catch {
+ switch $op {
+ 0 {
+ incr gets
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ continue;
+ }
+
+ set rec [eval {$db get} $txn $gflags {$key}]
+ error_check_bad "$db get $key" [llength $rec] 0
+ set partial [string range \
+ [lindex [lindex $rec 0] 1] 0 [expr $dlen - 1]]
+ error_check_good \
+ "$db get $key" $partial [pad_data $method $datastr]
+ }
+ 1 {
+ incr overwrite
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ set data $datastr:$procid
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good "$db put $key" $ret 0
+ }
+ 2 {
+ incr getput
+ set dbc [$db cursor -update]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 [expr $dlen - 1]]
+ error_check_good \
+ "$dbc get $key" $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put \
+ -current [chop_data $method $rec]]
+ error_check_good "$dbc put $key" $ret 0
+ error_check_good "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ 3 -
+ 4 -
+ 5 {
+ if { $op == 3 } {
+ set flags ""
+ } else {
+ set flags -update
+ }
+ set dbc [eval {$db cursor} $flags]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+
+ # Now read a few keys sequentially
+ set nloop [berkdb random_int 0 10]
+ if { [berkdb random_int 0 1] == 0 } {
+ set flags -next
+ } else {
+ set flags -prev
+ }
+ while { $nloop > 0 } {
+ set lastret $ret
+ set ret [eval {$dbc get} $flags]
+ # Might read beginning/end of file
+ if { [llength $ret] == 0} {
+ set ret $lastret
+ break
+ }
+ incr nloop -1
+ }
+ switch $op {
+ 3 {
+ incr seqread
+ }
+ 4 {
+ incr seqput
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 \
+ [expr $dlen - 1]]
+ error_check_good "$dbc get $key" \
+ $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put -current \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $key" $ret 0
+ }
+ 5 {
+ incr seqdel
+ set k [lindex [lindex $ret 0] 0]
+ # We need to lock the item we're
+ # deleting so that someone else can't
+ # try to do a get while we're
+ # deleting
+ error_check_good "$klock put" \
+ [$klock put] 0
+ set klock NOLOCK
+ set cur [$dbc get -current]
+ error_check_bad get_current \
+ [llength $cur] 0
+ set key [lindex [lindex $cur 0] 0]
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue
+ }
+ set ret [$dbc del]
+ error_check_good "$dbc del" $ret 0
+ set rec $datastr
+ append rec ":$procid"
+ if { $renum == 1 } {
+ set ret [$dbc put -before \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret $k
+ } elseif { \
+ [is_record_based $method] == 1 } {
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ set ret [$db put $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$db put $k" $ret 0
+ } else {
+ set ret [$dbc put -keylast $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret 0
+ }
+ }
+ }
+ if { $close_cursor == 1 } {
+ error_check_good \
+ "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+ global exception_handled;
+
+# puts $errorInfo
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+
+ if { [string compare $klock NOLOCK] != 0 } {
+ catch {$klock put}
+ }
+ if {$close_cursor == 1} {
+ catch {$dbc close}
+ set close_cursor 0
+ }
+
+ if {[string first FAIL $theError] == 0 && \
+ $exception_handled != 1} {
+ flush stdout
+ error "FAIL:[timestamp] test042: key $k: $theError"
+ }
+ set exception_handled 0
+ } else {
+ if { [string compare $klock NOLOCK] != 0 } {
+ error_check_good "$klock put" [$klock put] 0
+ set klock NOLOCK
+ }
+ }
+}
+
+error_check_good db_close_catch [catch {$db close} ret] 0
+error_check_good db_close $ret 0
+error_check_good dbenv_close [$dbenv close] 0
+
+flush stdout
+exit
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: "
+puts "\t$gets gets"
+puts "\t$overwrite overwrites"
+puts "\t$getput getputs"
+puts "\t$seqread seqread"
+puts "\t$seqput seqput"
+puts "\t$seqdel seqdel"
+flush stdout
diff --git a/db-4.8.30/test/memp001.tcl b/db-4.8.30/test/memp001.tcl
new file mode 100644
index 0000000..1afe1be
--- /dev/null
+++ b/db-4.8.30/test/memp001.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp001
+# TEST Randomly updates pages.
+proc memp001 { } {
+ source ./include.tcl
+ memp001_body 1 ""
+ memp001_body 3 ""
+ memp001_body 1 -private
+ memp001_body 3 -private
+ if { $is_qnx_test } {
+ puts "Skipping remainder of memp001 for\
+ environments in system memory on QNX"
+ return
+ }
+ memp001_body 1 "-system_mem -shm_key 1"
+ memp001_body 3 "-system_mem -shm_key 1"
+}
+
+proc memp001_body { ncache flags } {
+ source ./include.tcl
+ global rand_init
+
+ set nfiles 5
+ set iter 500
+ set psize 512
+ set cachearg "-cachesize {0 400000 $ncache}"
+
+ puts \
+"Memp001: { $flags } random update $iter iterations on $nfiles files."
+ #
+ # Check if this platform supports this set of flags
+ #
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ puts "\tMemp001.a: Create env with $ncache caches"
+ set env [eval {berkdb_env -create -mode 0644} \
+ $cachearg {-home $testdir} $flags]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ #
+ # Do a simple mpool_stat call to verify the number of caches
+ # just to exercise the stat code.
+ set stat [$env mpool_stat]
+ set str "Number of caches"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ error_check_good ncache [lindex $statpair 1] $ncache
+ }
+ }
+ error_check_good checked $checked 1
+
+ # Open N memp files
+ puts "\tMemp001.b: Create $nfiles mpool files"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ set fname "data_file.$i"
+ file_create $testdir/$fname 50 $psize
+
+ set mpools($i) \
+ [$env mpool -create -pagesize $psize -mode 0644 $fname]
+ error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
+ }
+
+ # Now, loop, picking files at random
+ berkdb srand $rand_init
+ puts "\tMemp001.c: Random page replacement loop"
+ for {set i 0} {$i < $iter} {incr i} {
+ set mpool $mpools([berkdb random_int 1 $nfiles])
+ set p(1) [get_range $mpool 10]
+ set p(2) [get_range $mpool 10]
+ set p(3) [get_range $mpool 10]
+ set p(1) [replace $mpool $p(1)]
+ set p(3) [replace $mpool $p(3)]
+ set p(4) [get_range $mpool 20]
+ set p(4) [replace $mpool $p(4)]
+ set p(5) [get_range $mpool 10]
+ set p(6) [get_range $mpool 20]
+ set p(7) [get_range $mpool 10]
+ set p(8) [get_range $mpool 20]
+ set p(5) [replace $mpool $p(5)]
+ set p(6) [replace $mpool $p(6)]
+ set p(9) [get_range $mpool 40]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [get_range $mpool 40]
+ set p(7) [replace $mpool $p(7)]
+ set p(8) [replace $mpool $p(8)]
+ set p(9) [replace $mpool $p(9) ]
+ set p(10) [replace $mpool $p(10)]
+ #
+ # We now need to put all the pages we have here or
+ # else they end up pinned.
+ #
+ for {set x 1} { $x <= 10} {incr x} {
+ error_check_good pgput [$p($x) put] 0
+ }
+ }
+
+ # Close N memp files, close the environment.
+ puts "\tMemp001.d: Close mpools"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
+ }
+ error_check_good envclose [$env close] 0
+
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ fileremove -f $testdir/data_file.$i
+ }
+}
+
+proc file_create { fname nblocks blocksize } {
+ set fid [open $fname w]
+ for {set i 0} {$i < $nblocks} {incr i} {
+ seek $fid [expr $i * $blocksize] start
+ puts -nonewline $fid $i
+ }
+ seek $fid [expr $nblocks * $blocksize - 1]
+
+ # We don't end the file with a newline, because some platforms (like
+ # Windows) emit CR/NL. There does not appear to be a BINARY open flag
+ # that prevents this.
+ puts -nonewline $fid "Z"
+ close $fid
+
+ # Make sure it worked
+ if { [file size $fname] != $nblocks * $blocksize } {
+ error "FAIL: file_create could not create correct file size"
+ }
+}
+
+proc get_range { mpool max } {
+ set pno [berkdb random_int 0 $max]
+ set p [eval $mpool get $pno]
+ error_check_good page [is_valid_page $p $mpool] TRUE
+ set got [$p pgnum]
+ if { $got != $pno } {
+ puts "Get_range: Page mismatch page |$pno| val |$got|"
+ }
+ set ret [$p init "Page is pinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ return $p
+}
+
+proc replace { mpool p { args "" } } {
+ set pgno [$p pgnum]
+
+ set ret [$p init "Page is unpinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ set ret [$p put]
+ error_check_good page_put $ret 0
+
+ set p2 [eval $mpool get $args $pgno]
+ error_check_good page [is_valid_page $p2 $mpool] TRUE
+
+ return $p2
+}
+
+proc mem_chk { flags } {
+ source ./include.tcl
+ global errorCode
+
+ # Open the memp with region init specified
+ env_cleanup $testdir
+
+ set cachearg " -cachesize {0 400000 3}"
+ set ret [catch {eval {berkdb_env_noerr -create -mode 0644}\
+ $cachearg {-region_init -home $testdir} $flags} env]
+ if { $ret != 0 } {
+ # If the env open failed, it may be because we're on a platform
+ # such as HP-UX 10 that won't support mutexes in shmget memory.
+ # Or QNX, which doesn't support system memory at all.
+ # Verify that the return value was EINVAL or EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good is_shm_test [is_substr $flags -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning:\
+ platform does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ return 1
+ }
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ return 0
+}
diff --git a/db-4.8.30/test/memp002.tcl b/db-4.8.30/test/memp002.tcl
new file mode 100644
index 0000000..015b99a
--- /dev/null
+++ b/db-4.8.30/test/memp002.tcl
@@ -0,0 +1,67 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp002
+# TEST Tests multiple processes accessing and modifying the same files.
+proc memp002 { } {
+ source ./include.tcl
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp002_body with -private.
+ #
+ memp002_body ""
+ if { $is_qnx_test } {
+ puts "Skipping remainder of memp002 for\
+ environments in system memory on QNX"
+ return
+ }
+ memp002_body "-system_mem -shm_key 1"
+}
+
+proc memp002_body { flags } {
+ source ./include.tcl
+
+ puts "Memp002: {$flags} Multiprocess mpool tester"
+
+ set procs 4
+ set psizes "512 1024 2048 4096 8192"
+ set iterations 500
+ set npages 100
+
+ # Check if this combination of flags is supported by this arch.
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ set iter [expr $iterations / $procs]
+
+ # Clean up old stuff and create new.
+ env_cleanup $testdir
+
+ for { set i 0 } { $i < [llength $psizes] } { incr i } {
+ fileremove -f $testdir/file$i
+ }
+ set e [eval {berkdb_env -create -lock -home $testdir} $flags]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ set pidlist {}
+ for { set i 0 } { $i < $procs } {incr i} {
+
+ puts "$tclsh_path\
+ $test_path/mpoolscript.tcl $testdir $i $procs \
+ $iter $psizes $npages 3 $flags > \
+ $testdir/memp002.$i.out &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
+ $iter $psizes $npages 3 $flags &]
+ lappend pidlist $p
+ }
+ puts "Memp002: $procs independent processes now running"
+ watch_procs $pidlist
+
+ reset_env $e
+}
diff --git a/db-4.8.30/test/memp003.tcl b/db-4.8.30/test/memp003.tcl
new file mode 100644
index 0000000..ee7633c
--- /dev/null
+++ b/db-4.8.30/test/memp003.tcl
@@ -0,0 +1,159 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST memp003
+# TEST Test reader-only/writer process combinations; we use the access methods
+# TEST for testing.
+proc memp003 { } {
+ source ./include.tcl
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp003_body with -private.
+ #
+ memp003_body ""
+ if { $is_qnx_test } {
+ puts "Skipping remainder of memp003 for\
+ environments in system memory on QNX"
+ return
+ }
+ memp003_body "-system_mem -shm_key 1"
+}
+
+proc memp003_body { flags } {
+ global alphabet
+ source ./include.tcl
+
+ puts "Memp003: {$flags} Reader/Writer tests"
+
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ set psize 1024
+ set nentries 500
+ set testfile mpool.db
+ set t1 $testdir/t1
+
+ # Create an environment that the two processes can share, with
+ # 20 pages per cache.
+ set c [list 0 [expr $psize * 20 * 3] 3]
+ set dbenv [eval {berkdb_env \
+ -create -lock -home $testdir -cachesize $c} $flags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # First open and create the file.
+ set db [berkdb_open -env $dbenv -create \
+ -mode 0644 -pagesize $psize -btree $testfile]
+ error_check_good dbopen/RW [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set txn ""
+ set count 0
+
+ puts "\tMemp003.a: create database"
+ set keys ""
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys $str
+
+ set ret [eval {$db put} $txn {$str $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn {$str}]
+ error_check_good get $ret [list [list $str $str]]
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Now open the file for read-only
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/RO [is_substr $db db] 1
+
+ puts "\tMemp003.b: verify a few keys"
+ # Read and verify a couple of keys; saving them to check later
+ set testset ""
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ndx [berkdb random_int 0 [expr $nentries - 1]]
+ set key [lindex $keys $ndx]
+ if { [lsearch $testset $key] != -1 } {
+ incr i -1
+ continue;
+ }
+
+ # The remote process stuff is unhappy with
+ # zero-length keys; make sure we don't pick one.
+ if { [llength $key] == 0 } {
+ incr i -1
+ continue
+ }
+
+ lappend testset $key
+
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get/RO $ret [list [list $key $key]]
+ }
+
+ puts "\tMemp003.c: retrieve and modify keys in remote process"
+ # Now open remote process where we will open the file RW
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ puts $f1 "flush stdout"
+ flush $f1
+
+ set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
+ set remote_env [send_cmd $f1 \
+ "berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
+ error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
+
+ set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
+ error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
+
+ foreach k $testset {
+ # Get the key
+ set ret [send_cmd $f1 "$remote_db get $k"]
+ error_check_good remote_get $ret [list [list $k $k]]
+
+ # Now replace the key
+ set ret [send_cmd $f1 "$remote_db put $k $k$k"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.d: verify changes in local process"
+ foreach k $testset {
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get_verify/RO $ret [list [list $key $key$key]]
+ }
+
+ puts "\tMemp003.e: Fill up the cache with dirty buffers"
+ foreach k $testset {
+ # Now rewrite the keys with BIG data
+ set data [replicate $alphabet 32]
+ set ret [send_cmd $f1 "$remote_db put $k $data"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.f: Get more pages for the read-only file"
+ dump_file $db $txn $t1 nop
+
+ puts "\tMemp003.g: Sync from the read-only file"
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [send_cmd $f1 "$remote_db close"]
+ error_check_good remote_get $ret 0
+
+ # Close the environment both remotely and locally.
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+ close $f1
+
+ reset_env $dbenv
+}
diff --git a/db-4.8.30/test/memp004.tcl b/db-4.8.30/test/memp004.tcl
new file mode 100644
index 0000000..d8e16cb
--- /dev/null
+++ b/db-4.8.30/test/memp004.tcl
@@ -0,0 +1,82 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+
+# TEST memp004
+# TEST Test that small read-only databases are mapped into memory.
+proc memp004 { } {
+ global is_qnx_test
+ source ./include.tcl
+
+ puts "Memp004: Test of memory-mapped read-only databases"
+
+ if { $is_qnx_test } {
+ puts "Memp004: skipping for QNX"
+ return
+ }
+
+ env_cleanup $testdir
+ set testfile memp004.db
+
+ # Create an environment.
+ puts "memp004.a: Create an environment and database"
+ set dbenv [eval {berkdb_env -create -home $testdir -private}]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set db [berkdb_open -env $dbenv -create -mode 0644 -btree $testfile]
+ error_check_good dbopen/$testfile/RW [is_valid_db $db] TRUE
+
+ # Put each key/data pair.
+ set did [open $dict]
+ set keys ""
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ lappend keys $str
+
+ set ret [eval {$db put} {$str $str}]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Discard the environment.
+ error_check_good close [$dbenv close] 0
+
+ puts "memp004.b: Re-create the environment and open database read-only"
+ set dbenv [eval {berkdb_env -create -home $testdir}]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/$testfile/RO [is_substr $db db] 1
+
+ # Read a couple of keys.
+ set c [eval {$db cursor}]
+ for { set i 0 } { $i < 500 } { incr i } {
+ set ret [$c get -next]
+ }
+
+ puts "memp004.c: Check mpool statistics"
+ set tmp [memp004_stat $dbenv "Pages mapped into address space"]
+ error_check_good "mmap check: $tmp >= 500" [expr $tmp >= 500] 1
+
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+}
+
+# memp004_stat --
+# Return the current mpool statistics.
+proc memp004_stat { env s } {
+ set stat [$env mpool_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: memp004: stat string $s not found"
+ return 0
+}
diff --git a/db-4.8.30/test/mpoolscript.tcl b/db-4.8.30/test/mpoolscript.tcl
new file mode 100644
index 0000000..8c8a7e4
--- /dev/null
+++ b/db-4.8.30/test/mpoolscript.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Random multiple process mpool tester.
+# Usage: mpoolscript dir id numiters numfiles numpages sleepint
+# dir: lock directory.
+# id: Unique identifier for this process.
+# maxprocs: Number of procs in this test.
+# numiters: Total number of iterations.
+# pgsizes: Pagesizes for the different files. Length of this item indicates
+# how many files to use.
+# numpages: Number of pages per file.
+# sleepint: Maximum sleep interval.
+# flags: Flags for env open
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage \
+ "mpoolscript dir id maxprocs numiters pgsizes numpages sleepint flags"
+
+# Verify usage
+if { $argc != 8 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ puts $argc
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set id [lindex $argv 1]
+set maxprocs [lindex $argv 2]
+set numiters [ lindex $argv 3 ]
+set pgsizes [ lindex $argv 4 ]
+set numpages [ lindex $argv 5 ]
+set sleepint [ lindex $argv 6 ]
+set flags [ lindex $argv 7]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+# Give time for all processes to start up.
+tclsleep 10
+
+puts -nonewline "Beginning execution for $id: $maxprocs $dir $numiters"
+puts " $pgsizes $numpages $sleepint"
+flush stdout
+
+# Figure out how small/large to make the cache
+set max 0
+foreach i $pgsizes {
+ if { $i > $max } {
+ set max $i
+ }
+}
+
+set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
+set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
+set e [eval $env_cmd $flags]
+error_check_good env_open [is_valid_env $e] TRUE
+
+# Now open files
+set mpools {}
+set nfiles 0
+foreach psize $pgsizes {
+ set mp [$e mpool -create -mode 0644 -pagesize $psize file$nfiles]
+ error_check_good memp_fopen:$nfiles [is_valid_mpool $mp $e] TRUE
+ lappend mpools $mp
+ incr nfiles
+}
+
+puts "Establishing long-term pin on file 0 page $id for process $id"
+
+# Set up the long-pin page
+set locker [$e lock_id]
+set lock [$e lock_get write $locker 0:$id]
+error_check_good lock_get [is_valid_lock $lock $e] TRUE
+
+set mp [lindex $mpools 0]
+set master_page [$mp get -create -dirty $id]
+error_check_good mp_get:$master_page [is_valid_page $master_page $mp] TRUE
+
+set r [$master_page init MASTER$id]
+error_check_good page_init $r 0
+
+# Release the lock but keep the page pinned
+set r [$lock put]
+error_check_good lock_put $r 0
+
+# Main loop. On each iteration, we'll check every page in each of
+# of the files. On any file, if we see the appropriate tag in the
+# field, we'll rewrite the page, else we won't. Keep track of
+# how many pages we actually process.
+set pages 0
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ puts "[timestamp]: iteration $iter, $pages pages set so far"
+ flush stdout
+ for { set fnum 1 } { $fnum < $nfiles } { incr fnum } {
+ if { [expr $fnum % 2 ] == 0 } {
+ set pred [expr ($id + $maxprocs - 1) % $maxprocs]
+ } else {
+ set pred [expr ($id + $maxprocs + 1) % $maxprocs]
+ }
+
+ set mpf [lindex $mpools $fnum]
+ for { set p 0 } { $p < $numpages } { incr p } {
+ set lock [$e lock_get write $locker $fnum:$p]
+ error_check_good lock_get:$fnum:$p \
+ [is_valid_lock $lock $e] TRUE
+
+ # Now, get the page
+ set pp [$mpf get -create -dirty $p]
+ error_check_good page_get:$fnum:$p \
+ [is_valid_page $pp $mpf] TRUE
+
+ if { [$pp is_setto $pred] == 0 || [$pp is_setto 0] == 0 } {
+ # Set page to self.
+ set r [$pp init $id]
+ error_check_good page_init:$fnum:$p $r 0
+ incr pages
+ set r [$pp put]
+ error_check_good page_put:$fnum:$p $r 0
+ } else {
+ error_check_good page_put:$fnum:$p [$pp put] 0
+ }
+ error_check_good lock_put:$fnum:$p [$lock put] 0
+ }
+ }
+ tclsleep [berkdb random_int 1 $sleepint]
+}
+
+# Now verify your master page, release its pin, then verify everyone else's
+puts "$id: End of run verification of master page"
+set r [$master_page is_setto MASTER$id]
+error_check_good page_check $r 1
+set r [$master_page put]
+error_check_good page_put $r 0
+
+set i [expr ($id + 1) % $maxprocs]
+set mpf [lindex $mpools 0]
+
+while { $i != $id } {
+ set p [$mpf get -create $i]
+ error_check_good mp_get [is_valid_page $p $mpf] TRUE
+
+ if { [$p is_setto MASTER$i] != 1 } {
+ puts "Warning: Master page $i not set."
+ }
+ error_check_good page_put:$p [$p put] 0
+
+ set i [expr ($i + 1) % $maxprocs]
+}
+
+# Close files
+foreach i $mpools {
+ set r [$i close]
+ error_check_good mpf_close $r 0
+}
+
+# Close environment system
+set r [$e close]
+error_check_good env_close $r 0
+
+puts "[timestamp] $id Complete"
+flush stdout
diff --git a/db-4.8.30/test/mut001.tcl b/db-4.8.30/test/mut001.tcl
new file mode 100644
index 0000000..316aee5
--- /dev/null
+++ b/db-4.8.30/test/mut001.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+#
+# TEST mut001
+# TEST Exercise the mutex API.
+#
+# TEST Allocate, lock, unlock, and free a bunch of mutexes.
+# TEST Set basic configuration options and check mutex_stat and
+# TEST the mutex getters for the correct values.
+
+proc mut001 { } {
+ source ./include.tcl
+ env_cleanup $testdir
+
+ puts "Mut001: Basic mutex interface testing."
+
+ # Open an env.
+ set env [berkdb_env -create -home $testdir]
+
+ # Allocate, lock, unlock, and free a bunch of mutexes.
+ set nmutexes 100
+ puts "\tMut001.a: Allocate a bunch of mutexes."
+ for { set i 0 } { $i < $nmutexes } { incr i } {
+ set mutexid($i) [$env mutex]
+ }
+ puts "\tMut001.b: Lock the mutexes."
+ for { set i 0 } { $i < $nmutexes } { incr i } {
+ error_check_good mutex_lock [$env mutex_lock $mutexid($i)] 0
+ }
+ puts "\tMut001.c: Unlock the mutexes."
+ for { set i 0 } { $i < $nmutexes } { incr i } {
+ error_check_good mutex_unlock [$env mutex_unlock $mutexid($i)] 0
+ }
+ puts "\tMut001.d: Free the mutexes."
+ for { set i 0 } { $i < $nmutexes } { incr i } {
+ error_check_good mutex_free [$env mutex_free $mutexid($i)] 0
+ }
+
+ # Clean up the env. We'll need new envs to test the configuration
+ # options, because they cannot be set after the env is open.
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ puts "\tMut001.e: Set the mutex alignment."
+ set mutex_align 8
+ set env [berkdb_env -create -home $testdir -mutex_set_align $mutex_align]
+
+ set stat_align [stat_field $env mutex_stat "Mutex align"]
+ set get_align [$env mutex_get_align]
+ error_check_good stat_align $stat_align $mutex_align
+ error_check_good get_align $get_align $mutex_align
+
+ # Find the number of mutexes allocated by default. We'll need
+ # this later, when we try the "mutex_set_increment" option.
+ set default_count [stat_field $env mutex_stat "Mutex count"]
+
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ puts "\tMut001.f: Set the maximum number of mutexes."
+ set mutex_count 2000
+ set env [berkdb_env -create -home $testdir -mutex_set_max $mutex_count]
+
+ set stat_count [stat_field $env mutex_stat "Mutex count"]
+ set get_count [$env mutex_get_max]
+ error_check_good stat_count $stat_count $mutex_count
+ error_check_good get_count $get_count $mutex_count
+
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ puts "\tMut001.g: Raise the maximum number of mutexes."
+ set mutex_incr 500
+ set mutex_count [expr $default_count + $mutex_incr]
+
+ set env [berkdb_env -create -home $testdir -mutex_set_incr $mutex_incr]
+
+ set stat_count [stat_field $env mutex_stat "Mutex count"]
+ error_check_good stat_increment $stat_count $mutex_count
+ set get_count [$env mutex_get_max]
+ error_check_good get_increment $get_count $mutex_count
+
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ puts "\tMut001.h: Set and reset the number of TAS mutex spins."
+ set mutex_tas_spins 50
+
+ set env [berkdb_env -create -home $testdir -mutex_set_tas_spins $mutex_tas_spins]
+ set stat_spins [stat_field $env mutex_stat "Mutex TAS spins"]
+ error_check_good stat_spins $stat_spins $mutex_tas_spins
+ set get_spins [$env mutex_get_tas_spins]
+ error_check_good get_spins $get_spins $mutex_tas_spins
+
+ # TAS spins can be reset any time.
+ set mutex_tas_spins 1
+ error_check_good reset_spins [$env mutex_set_tas_spins $mutex_tas_spins] 0
+ set stat_spins [stat_field $env mutex_stat "Mutex TAS spins"]
+ error_check_good stat_spins_reset $stat_spins $mutex_tas_spins
+ set get_spins [$env mutex_get_tas_spins]
+ error_check_good get_spins_reset $get_spins $mutex_tas_spins
+
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+}
+
diff --git a/db-4.8.30/test/mut002.tcl b/db-4.8.30/test/mut002.tcl
new file mode 100644
index 0000000..057e9d8
--- /dev/null
+++ b/db-4.8.30/test/mut002.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST mut002
+# TEST Two-process mutex test.
+#
+# TEST Allocate and lock a self-blocking mutex. Start another process.
+# TEST Try to lock the mutex again -- it will block.
+# TEST Unlock the mutex from the other process, and the blocked
+# TEST lock should be obtained. Clean up.
+# TEST Do another test with a "-process-only" mutex. The second
+# TEST process should not be able to unlock the mutex.
+
+proc mut002 { } {
+ source ./include.tcl
+
+ puts "Mut002: Two process mutex test."
+
+ # Open an env.
+ set env [berkdb_env -create -home $testdir]
+
+ puts "\tMut002.a: Allocate and lock a mutex."
+ set mutex [$env mutex -self_block]
+ error_check_good obtained_lock [$env mutex_lock $mutex] 0
+
+ # Start a second process.
+ puts "\tMut002.b: Start another process."
+ set p2 [exec $tclsh_path $test_path/wrap.tcl mut002script.tcl\
+ $testdir/mut002.log $testdir $mutex &]
+
+ # Try to lock the mutex again. This will hang until the second
+ # process unlocks it.
+ $env mutex_lock $mutex
+
+ watch_procs $p2 1 20
+
+ # Clean up, and check the log file from process 2.
+ error_check_good mutex_unlock [$env mutex_unlock $mutex] 0
+ error_check_good env_close [$env close] 0
+
+ # We expect the log file to be empty. If there are any
+ # messages, report them as failures.
+ set fd [open $testdir/mut002.log r]
+ while { [gets $fd line] >= 0 } {
+ puts "FAIL: unexpected output in log file mut002: $line"
+ }
+ close $fd
+}
+
diff --git a/db-4.8.30/test/mut002script.tcl b/db-4.8.30/test/mut002script.tcl
new file mode 100644
index 0000000..7f340f5
--- /dev/null
+++ b/db-4.8.30/test/mut002script.tcl
@@ -0,0 +1,39 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Mut002script: for use with mut002, a 2-process mutex test.
+# Usage: mut002script testdir
+# testdir: directory containing the env we are joining.
+# mutex: id of mutex
+
+source ./include.tcl
+
+set usage "mut002script testdir mutex"
+
+# Verify usage
+if { $argc != 2 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments.
+set testdir [ lindex $argv 0 ]
+set mutex [ lindex $argv 1 ]
+
+# Open environment.
+if {[catch {eval {berkdb_env} -create -home $testdir } dbenv]} {
+ puts "FAIL: opening env returned $dbenv"
+}
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+# Pause for a while to let the original process block.
+tclsleep 10
+
+# Unlock the mutex and let the original process proceed.
+$dbenv mutex_unlock $mutex
+
+# Clean up.
+error_check_good env_close [$dbenv close] 0
diff --git a/db-4.8.30/test/mut003.tcl b/db-4.8.30/test/mut003.tcl
new file mode 100644
index 0000000..003adc8
--- /dev/null
+++ b/db-4.8.30/test/mut003.tcl
@@ -0,0 +1,59 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST mut003
+# TEST Try doing mutex operations out of order. Make sure
+# TEST we get appropriate errors.
+
+proc mut003 { } {
+ source ./include.tcl
+ env_cleanup $testdir
+
+ puts "Mut003: Out of order mutex operations."
+
+ # Allocate a mutex. Try to unlock it before it's locked.
+ puts "\tMut003.a: Try to unlock a mutex that's not locked."
+ set env [berkdb_env_noerr -create -home $testdir]
+ set mutex [$env mutex]
+ catch { $env mutex_unlock $mutex } res
+ error_check_good \
+ already_unlocked [is_substr $res "lock already unlocked"] 1
+ env_cleanup $testdir
+
+ # Allocate and lock a mutex. Try to unlock it twice.
+ puts "\tMut003.b: Try to unlock a mutex twice."
+ set env [berkdb_env_noerr -create -home $testdir]
+ set mutex [$env mutex]
+ error_check_good mutex_lock [$env mutex_lock $mutex] 0
+ error_check_good mutex_unlock [$env mutex_unlock $mutex] 0
+ catch { $env mutex_unlock $mutex } res
+ error_check_good \
+ already_unlocked [is_substr $res "lock already unlocked"] 1
+ env_cleanup $testdir
+
+ # Allocate and free a mutex. Then try to lock it.
+ puts "\tMut003.c: Try to lock a freed mutex."
+ set env [berkdb_env_noerr -create -home $testdir]
+ set mutex [$env mutex]
+ error_check_good mutex_free [$env mutex_free $mutex] 0
+ catch { $env mutex_lock $mutex } res
+
+# error_check_good \
+# allocation_error [is_substr $res "lock already unlocked"] 1
+ env_cleanup $testdir
+
+ # Allocate and lock a mutex. Try to free it before it's unlocked.
+ puts "\tMut003.d: Try to free a still-locked mutex."
+ set env [berkdb_env_noerr -create -home $testdir]
+ set mutex [$env mutex]
+ error_check_good mutex_lock [$env mutex_lock $mutex] 0
+ catch { $env mutex_free $mutex } res
+
+# error_check_good \
+# allocation_error [is_substr $res "lock already unlocked"] 1
+ env_cleanup $testdir
+}
+
diff --git a/db-4.8.30/test/ndbm.tcl b/db-4.8.30/test/ndbm.tcl
new file mode 100644
index 0000000..abc9aff
--- /dev/null
+++ b/db-4.8.30/test/ndbm.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Historic NDBM interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc ndbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "NDBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/ndbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ set db [berkdb ndbm_open -create -truncate -mode 0644 $testfile]
+ error_check_good ndbm_open [is_substr $db ndbm] 1
+ set did [open $dict]
+
+ error_check_good rdonly_false [$db rdonly] 0
+
+ set flags 0
+ set txn 0
+ set count 0
+ set skippednullkey 0
+
+ puts "\tNDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # NDBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [$db store $str $str insert]
+ error_check_good ndbm_store $ret 0
+
+ set d [$db fetch $str]
+ error_check_good ndbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tNDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # File descriptors tests won't work under Windows.
+ if { $is_windows_test != 1 } {
+ puts "\tNDBM.c: pagf/dirf test"
+ set fd [$db pagfno]
+ error_check_bad pagf $fd -1
+ set fd [$db dirfno]
+ error_check_bad dirf $fd -1
+ }
+
+ puts "\tNDBM.d: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open -rdonly $testfile]
+ error_check_good ndbm_open2 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ error_check_good rdonly_true [$db rdonly] "rdonly:not owner"
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch2 $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tNDBM.e: sequential scan and delete"
+
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open $testfile]
+ error_check_good ndbm_open3 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set ret [$db delete $key]
+ error_check_good ndbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+ error_check_good ndbm_close [$db close] 0
+}
diff --git a/db-4.8.30/test/parallel.tcl b/db-4.8.30/test/parallel.tcl
new file mode 100644
index 0000000..a848a33
--- /dev/null
+++ b/db-4.8.30/test/parallel.tcl
@@ -0,0 +1,398 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# Code to load up the tests in to the Queue database
+# $Id$
+proc load_queue { file {dbdir RUNQUEUE} nitems } {
+ global serial_tests
+ global num_serial
+ global num_parallel
+
+ puts -nonewline "Loading run queue with $nitems items..."
+ flush stdout
+
+ set env [berkdb_env -create -lock -home $dbdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Open two databases, one for tests that may be run
+ # in parallel, the other for tests we want to run
+ # while only a single process is testing.
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 -len 200 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set serialdb [eval {berkdb_open -env $env -create \
+ -mode 0644 -len 200 -queue serialqueue.db} ]
+ error_check_good dbopen [is_valid_db $serialdb] TRUE
+
+ set fid [open $file]
+
+ set count 0
+
+ while { [gets $fid str] != -1 } {
+ set testarr($count) $str
+ incr count
+ }
+
+ # Randomize array of tests.
+ set rseed [pid]
+ berkdb srand $rseed
+ puts -nonewline "randomizing..."
+ flush stdout
+ for { set i 0 } { $i < $count } { incr i } {
+ set tmp $testarr($i)
+
+ # RPC test is very long so force it to run first
+ # in full runs. If we find 'r rpc' as we walk the
+ # array, arrange to put it in slot 0 ...
+ if { [is_substr $tmp "r rpc"] == 1 && \
+ [string match $nitems ALL] } {
+ set j 0
+ } else {
+ set j [berkdb random_int $i [expr $count - 1]]
+ }
+ # ... and if 'r rpc' is selected to be swapped with the
+ # current item in the array, skip the swap. If we
+ # did the swap and moved to the next item, "r rpc" would
+ # never get moved to slot 0.
+ if { [is_substr $testarr($j) "r rpc"] && \
+ [string match $nitems ALL] } {
+ continue
+ }
+
+ set testarr($i) $testarr($j)
+ set testarr($j) $tmp
+ }
+
+ if { [string compare ALL $nitems] != 0 } {
+ set maxload $nitems
+ } else {
+ set maxload $count
+ }
+
+ puts "loading..."
+ flush stdout
+ set num_serial 0
+ set num_parallel 0
+ for { set i 0 } { $i < $maxload } { incr i } {
+ set str $testarr($i)
+ # Push serial tests into serial testing db, others
+ # into parallel db.
+ if { [is_serial $str] } {
+ set ret [eval {$serialdb put -append $str}]
+ error_check_good put:serialdb [expr $ret > 0] 1
+ incr num_serial
+ } else {
+ set ret [eval {$db put -append $str}]
+ error_check_good put:paralleldb [expr $ret > 0] 1
+ incr num_parallel
+ }
+ }
+
+ error_check_good maxload $maxload [expr $num_serial + $num_parallel]
+ puts "Loaded $maxload records: $num_serial in serial,\
+ $num_parallel in parallel."
+ close $fid
+ $db close
+ $serialdb close
+ $env close
+}
+
+proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
+
+ if { [file exists $dbdir] != 1 } {
+ file mkdir $dbdir
+ }
+ puts "Creating test list..."
+ $list ALL -n
+ load_queue ALL.OUT $dbdir $nitems
+ file delete TEST.LIST
+ file rename ALL.OUT TEST.LIST
+}
+
+proc run_parallel { nprocs {list run_all} {nitems ALL} } {
+ global num_serial
+ global num_parallel
+
+ # Forcibly remove stuff from prior runs, if it's still there.
+ fileremove -f ./RUNQUEUE
+ set dirs [glob -nocomplain ./PARALLEL_TESTDIR.*]
+ set files [glob -nocomplain ALL.OUT.*]
+ foreach file $files {
+ fileremove -f $file
+ }
+ foreach dir $dirs {
+ fileremove -f $dir
+ }
+
+ set basename ./PARALLEL_TESTDIR
+ set queuedir ./RUNQUEUE
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ init_runqueue $queuedir $nitems $list
+
+ set basedir [pwd]
+ set queuedir ../../[string range $basedir \
+ [string last "/" $basedir] end]/$queuedir
+
+ # Run serial tests in parallel testdir 0.
+ run_queue 0 $basename.0 $queuedir serial $num_serial
+
+ set pidlist {}
+ # Run parallel tests in testdirs 1 through n.
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "source $test_path/test.tcl; run_queue $i \
+ $basename.$i $queuedir parallel $num_parallel" &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 300 1000000
+
+ set failed 0
+ for { set i 0 } { $i <= $nprocs } { incr i } {
+ if { [file exists ALL.OUT.$i] == 1 } {
+ puts -nonewline "Checking output from ALL.OUT.$i ... "
+ if { [check_output ALL.OUT.$i] == 1 } {
+ set failed 1
+ }
+ puts " done."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Regression tests succeeded."
+ } else {
+ puts "Regression tests failed."
+ puts "Review UNEXPECTED OUTPUT lines above for errors."
+ puts "Complete logs found in ALL.OUT.x files"
+ }
+}
+
+proc run_queue { i rundir queuedir {qtype parallel} {nitems 0} } {
+ set builddir [pwd]
+ file delete $builddir/ALL.OUT.$i
+ cd $rundir
+
+ puts "Starting $qtype run_queue process $i (pid [pid])."
+
+ source ./include.tcl
+ global env
+
+ set dbenv [berkdb_env -create -lock -home $queuedir]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ if { $qtype == "parallel" } {
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } elseif { $qtype == "serial" } {
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -queue serialqueue.db} ]
+ error_check_good serialdbopen [is_valid_db $db] TRUE
+ } else {
+ puts "FAIL: queue type $qtype not recognized"
+ }
+
+ set dbc [eval $db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ set waitcnt 0
+ set starttime [timestamp -r]
+
+ while { $waitcnt < 5 } {
+ set line [$db get -consume]
+ if { [ llength $line ] > 0 } {
+ set cmd [lindex [lindex $line 0] 1]
+ set num [lindex [lindex $line 0] 0]
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nExecuting record $num ([timestamp -w]):\n"
+ set tdir "TESTDIR.$i"
+ regsub -all {TESTDIR} $cmd $tdir cmd
+ puts $o $cmd
+ close $o
+ if { [expr {$num % 10} == 0] && $nitems != 0 } {
+ puts -nonewline \
+ "Starting test $num of $nitems $qtype items. "
+ set now [timestamp -r]
+ set elapsed_secs [expr $now - $starttime]
+ set secs_per_test [expr $elapsed_secs / $num]
+ set esttotal [expr $nitems * $secs_per_test]
+ set remaining [expr $esttotal - $elapsed_secs]
+ if { $remaining < 3600 } {
+ puts "\tRough guess: less than 1\
+ hour left."
+ } else {
+ puts "\tRough guess: \
+ [expr $remaining / 3600] hour(s) left."
+ }
+ }
+# puts "Process $i, record $num:\n$cmd"
+ set env(PURIFYOPTIONS) \
+ "-log-file=./test$num.%p -follow-child-processes -messages=first"
+ set env(PURECOVOPTIONS) \
+ "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; $cmd" \
+ >>& $builddir/ALL.OUT.$i } res] {
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "FAIL: '$cmd': $res"
+ close $o
+ }
+ env_cleanup $testdir
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nEnding record $num ([timestamp])\n"
+ close $o
+ incr count
+ } else {
+ incr waitcnt
+ tclsleep 1
+ }
+ }
+
+ set now [timestamp -r]
+ set elapsed [expr $now - $starttime]
+ puts "Process $i: $count commands executed in [format %02u:%02u \
+ [expr $elapsed / 3600] [expr ($elapsed % 3600) / 60]]"
+
+ error_check_good close_parallel_cursor_$i [$dbc close] 0
+ error_check_good close_parallel_db_$i [$db close] 0
+ error_check_good close_parallel_env_$i [$dbenv close] 0
+
+ #
+ # We need to put the pid file in the builddir's idea
+ # of testdir, not this child process' local testdir.
+ # Therefore source builddir's include.tcl to get its
+ # testdir.
+ # !!! This resets testdir, so don't do anything else
+ # local to the child after this.
+ source $builddir/include.tcl
+
+ set f [open $builddir/$testdir/end.[pid] w]
+ close $f
+ cd $builddir
+}
+
+proc mkparalleldirs { nprocs basename queuedir } {
+ source ./include.tcl
+ set dir [pwd]
+
+ if { $is_windows_test != 1 } {
+ set EXE ""
+ } else {
+ set EXE ".exe"
+ }
+ for { set i 0 } { $i <= $nprocs } { incr i } {
+ set destdir $basename.$i
+ catch {file mkdir $destdir}
+ puts "Created $destdir"
+ if { $is_windows_test == 1 } {
+ catch {file mkdir $destdir/$buildpath}
+ catch {eval file copy \
+ [eval glob {$dir/$buildpath/*.dll}] $destdir/$buildpath}
+ catch {eval file copy \
+ [eval glob {$dir/$buildpath/db_{checkpoint,deadlock}$EXE} \
+ {$dir/$buildpath/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/$buildpath/db_{archive,verify,hotbackup}$EXE}] \
+ {$dir/$buildpath/dbkill$EXE} \
+ $destdir/$buildpath}
+ catch {eval file copy \
+ [eval glob -nocomplain {$dir/$buildpath/db_{reptest,repsite}$EXE}] \
+ $destdir/$buildpath}
+ }
+ catch {eval file copy \
+ [eval glob {$dir/{.libs,include.tcl}}] $destdir}
+ # catch {eval file copy $dir/$queuedir $destdir}
+ catch {eval file copy \
+ [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
+ {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/db_{archive,verify,hotbackup}$EXE}] \
+ $destdir}
+ catch {eval file copy \
+ [eval glob -nocomplain {$dir/db_{reptest,repsite}$EXE}] $destdir}
+
+ # Create modified copies of include.tcl in parallel
+ # directories so paths still work.
+
+ set infile [open ./include.tcl r]
+ set d [read $infile]
+ close $infile
+
+ regsub {test_path } $d {test_path ../} d
+ regsub {src_root } $d {src_root ../} d
+ set tdir "TESTDIR.$i"
+ regsub -all {TESTDIR} $d $tdir d
+ set outfile [open $destdir/include.tcl w]
+ puts $outfile $d
+ close $outfile
+
+ global svc_list
+ foreach svc_exe $svc_list {
+ if { [file exists $dir/$svc_exe] } {
+ catch {eval file copy $dir/$svc_exe $destdir}
+ }
+ }
+ }
+}
+
+proc run_ptest { nprocs test args } {
+ global parms
+ global valid_methods
+ set basename ./PARALLEL_TESTDIR
+ set queuedir NULL
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ if { [info exists parms($test)] } {
+ foreach method $valid_methods {
+ if { [eval exec_ptest $nprocs $basename \
+ $test $method $args] != 0 } {
+ break
+ }
+ }
+ } else {
+ eval exec_ptest $nprocs $basename $test $args
+ }
+}
+
+proc exec_ptest { nprocs basename test args } {
+ source ./include.tcl
+
+ set basedir [pwd]
+ set pidlist {}
+ puts "Running $nprocs parallel runs of $test"
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set outf ALL.OUT.$i
+ fileremove -f $outf
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "cd $basename.$i;\
+ source ../$test_path/test.tcl;\
+ $test $args" >& $outf &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 30 36000
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_output ALL.OUT.$i] == 1 } {
+ set failed 1
+ puts "Test $test failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Test $test succeeded all processes"
+ return 0
+ } else {
+ puts "Test failed: stopping"
+ return 1
+ }
+}
diff --git a/db-4.8.30/test/plat001.tcl b/db-4.8.30/test/plat001.tcl
new file mode 100644
index 0000000..5a60c8c
--- /dev/null
+++ b/db-4.8.30/test/plat001.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST plat001
+# TEST
+# TEST Test of portability of sequences.
+# TEST
+# TEST Create and dump a database containing sequences. Save the dump.
+# TEST This test is used in conjunction with the upgrade tests, which
+# TEST will compare the saved dump to a locally created dump.
+
+proc plat001 { method {tnum "001"} args } {
+ source ./include.tcl
+ global fixed_len
+ global util_path
+
+ # Fixed_len must be increased from the default to
+ # accommodate fixed-record length methods.
+ set orig_fixed_len $fixed_len
+ set fixed_len 128
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/plat$tnum.db
+ set testdump $testdir/plat$tnum.dmp
+ set env NULL
+ } else {
+ set testfile plat$tnum.db
+ set testdump plat$tnum.dmp
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ set rpcenv [is_rpcenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+
+ # Make the key numeric so we can test record-based methods.
+ set key 1
+
+ puts "\tPlat$tnum.a: Create $method db with a sequence."
+ set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set init 1
+ set min $init
+ set max 1000000000
+ set seq [eval {berkdb sequence} \
+ -create -init $init -min $min -max $max $db $key]
+ error_check_good is_valid_seq [is_valid_seq $seq] TRUE
+
+ error_check_good seq_close [$seq close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tPlat$tnum.b: Dump the db."
+ set stat [catch {eval {exec $util_path/db_dump} -f $testdump \
+ $testfile} ret]
+ error_check_good sequence_dump $stat 0
+
+ puts "\tPlat$tnum.c: Delete the db."
+ error_check_good db_delete [fileremove $testfile] ""
+
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/db-4.8.30/test/recd001.tcl b/db-4.8.30/test/recd001.tcl
new file mode 100644
index 0000000..2bdb1d7
--- /dev/null
+++ b/db-4.8.30/test/recd001.tcl
@@ -0,0 +1,258 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd001
+# TEST Per-operation recovery tests for non-duplicate, non-split
+# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
+# TEST condition. Any test that appears with the message (change state)
+# TEST indicates that we've already run the particular test, but we are
+# TEST running it again so that we can change the state of the data base
+# TEST to prepare for the next test (this applies to all other recovery
+# TEST tests as well).
+# TEST
+# TEST These are the most basic recovery tests. We do individual recovery
+# TEST tests for each operation in the access method interface. First we
+# TEST create a file and capture the state of the database (i.e., we copy
+# TEST it. Then we run a transaction containing a single operation. In
+# TEST one test, we abort the transaction and compare the outcome to the
+# TEST original copy of the file. In the second test, we restore the
+# TEST original copy of the database and then run recovery and compare
+# TEST this against the actual database.
+proc recd001 { method {select 0} args } {
+ global fixed_len
+ source ./include.tcl
+
+ # puts "$args"
+ set envargs ""
+ set zero_idx [lsearch -exact $args "-zero_log"]
+ if { $zero_idx != -1 } {
+ set args [lreplace $args $zero_idx $zero_idx]
+ set envargs "-zero_log"
+ }
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd001: $method operation/transaction tests ($envargs)"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd001.db
+ set testfile2 recd001-2.db
+
+ set flags "-create -txn -home $testdir $envargs"
+
+ # For queue databases, we end up locking all records from one
+ # to the end of the queue, which depends on the default pagesize.
+ # Assume that page sizes default to 16KB or less, then we need 4K
+ # locks.
+ if { [is_record_based $method] == 1 } {
+ set flags "$flags -lock_max_locks 5000 -lock_max_objects 5000"
+ }
+
+ puts "\tRecd001.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ # puts "$oflags"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd001.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd001.b: put"}
+ { {DB del -txn TXNID $key} "Recd001.c: delete"}
+ { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
+ { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
+ { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
+ { {DB del -txn TXNID $key} "Recd001.g: big data delete"}
+ { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
+ { {DB put -txn TXNID -partial "$off $len" $key $partial_grow}
+ "Recd001.j: partial put growing"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
+ { {DB put -txn TXNID -partial "$off $len" $key $partial_shrink}
+ "Recd001.l: partial put shrinking"}
+ { {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
+ { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd001_key
+ }
+ set data recd001_data
+ set newdata NEWrecd001_dataNEW
+ set off 3
+ set len 12
+
+ set partial_grow replacement_record_grow
+ set partial_shrink xxx
+ if { [is_fixed_length $method] == 1 } {
+ set len [string length $partial_grow]
+ set partial_shrink $partial_grow
+ }
+ set bigdata [replicate $key $fixed_len]
+ if { [is_record_based $method] == 1 } {
+ set bigkey $fixed_len
+ } else {
+ set bigkey [replicate $key $fixed_len]
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg $args
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg $args
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ }
+ set fixed_len $orig_fixed_len
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping remainder of test for fixed length methods"
+ return
+ }
+
+ #
+ # Check partial extensions. If we add a key/data to the database
+ # and then expand it using -partial, then recover, recovery was
+ # failing in #3944. Check that scenario here.
+ #
+ # !!!
+ # We loop here because on each iteration, we need to clean up
+ # the old env (i.e. this test does not depend on earlier runs).
+ # If we run it without cleaning up the env inbetween, we do not
+ # test the scenario of #3944.
+ #
+ set len [string length $data]
+ set len2 256
+ set part_data [replicate "abcdefgh" 32]
+ set p [list 0 $len]
+ set cmd [subst \
+ {DB put -txn TXNID -partial "$len $len2" $key $part_data}]
+ set msg "Recd001.o: partial put prepopulated/expanding"
+ foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile2"
+ set db2 [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ set ret [$db put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+
+ set ret [$db2 put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
+
+ op_recover $op $testdir $env_cmd $testfile $cmd $msg $args
+ }
+ return
+}
diff --git a/db-4.8.30/test/recd002.tcl b/db-4.8.30/test/recd002.tcl
new file mode 100644
index 0000000..6de75eb
--- /dev/null
+++ b/db-4.8.30/test/recd002.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd002
+# TEST Split recovery tests. For every known split log message, makes sure
+# TEST that we exercise redo, undo, and do-nothing condition.
+proc recd002 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set envargs ""
+ set zero_idx [lsearch -exact $args "-zero_log"]
+ if { $zero_idx != -1 } {
+ set args [lreplace $args $zero_idx $zero_idx]
+ set envargs "-zero_log"
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd002: skipping for specific pagesizes"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Queues don't do splits, so we don't really need the small page
+ # size and the small page size is smaller than the record, so it's
+ # a problem.
+ if { [string compare $omethod "-queue"] == 0 } {
+ set pagesize 4096
+ } else {
+ set pagesize 512
+ }
+ puts "Recd002: $method split recovery tests ($envargs)"
+
+ env_cleanup $testdir
+ set testfile recd002.db
+ set testfile2 recd002-2.db
+ set eflags "-create -txn -lock_max_locks 2000 -home $testdir $envargs"
+
+ puts "\tRecd002.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that splits
+ # happen fairly quickly.
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"}
+ { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ set r [expr $n / 2 ]
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg $args
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg $args
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ }
+
+ puts "\tRecd002.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/db-4.8.30/test/recd003.tcl b/db-4.8.30/test/recd003.tcl
new file mode 100644
index 0000000..e3a33f6
--- /dev/null
+++ b/db-4.8.30/test/recd003.tcl
@@ -0,0 +1,125 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd003
+# TEST Duplicate recovery tests. For every known duplicate log message,
+# TEST makes sure that we exercise redo, undo, and do-nothing condition.
+# TEST
+# TEST Test all the duplicate log messages and recovery operations. We make
+# TEST sure that we exercise all possible recovery actions: redo, undo, undo
+# TEST but no fix necessary and redo but no fix necessary.
+proc recd003 { method {select 0} args } {
+ source ./include.tcl
+ global rand_init
+
+ set envargs ""
+ set zero_idx [lsearch -exact $args "-zero_log"]
+ if { $zero_idx != -1 } {
+ set args [lreplace $args $zero_idx $zero_idx]
+ set envargs "-zero_log"
+ }
+
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd003 skipping for method $method"
+ return
+ }
+ puts "Recd003: $method duplicate recovery tests ($envargs)"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ # See comment in recd001.tcl for why there are two database files...
+ set testfile recd003.db
+ set testfile2 recd003-2.db
+ set eflags "-create -txn -home $testdir $envargs"
+
+ puts "\tRecd003.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases.
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+ set n 10
+ set dupn 2000
+ set bign 500
+
+ # List of recovery tests: {CMD MSG} pairs
+ set dlist {
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.b: add dups"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.c: remove dups all at once"}
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.d: add dups (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.e: remove dups 1 at a time"}
+ { {populate DB $omethod TXNID $dupn 1 0}
+ "Recd003.f: dup split"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.g: remove dups (change state)"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.h: add big dup"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.i: remove big dup all at once"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.j: add big dup (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.k: remove big dup 1 at a time"}
+ { {populate DB $omethod TXNID $bign 1 1}
+ "Recd003.l: split big dup"}
+ }
+
+ foreach pair $dlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg $largs
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg $largs
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg $largs
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg $largs
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg $largs
+ }
+
+ puts "\tRecd003.m: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/db-4.8.30/test/recd004.tcl b/db-4.8.30/test/recd004.tcl
new file mode 100644
index 0000000..fb9a3ae
--- /dev/null
+++ b/db-4.8.30/test/recd004.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd004
+# TEST Big key test where big key gets elevated to internal page.
+proc recd004 { method {select 0} args } {
+ source ./include.tcl
+ global rand_init
+
+ set envargs ""
+ set zero_idx [lsearch -exact $args "-zero_log"]
+ if { $zero_idx != -1 } {
+ set args [lreplace $args $zero_idx $zero_idx]
+ set envargs "-zero_log"
+ }
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd004: skipping for specific pagesizes"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ puts "Recd004 skipping for method $method"
+ return
+ }
+ puts "Recd004: $method big-key on internal page recovery tests ($envargs)"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ set testfile recd004.db
+ set testfile2 recd004-2.db
+ set eflags "-create -txn -home $testdir $envargs"
+ puts "\tRecd004.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that we
+ # elevate quickly
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {big_populate DB TXNID $n} "Recd004.b: big key elevation"}
+ { {unpopulate DB TXNID 0} "Recd004.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort \
+ $testdir $env_cmd $testfile $cmd $msg $opts
+ op_recover commit \
+ $testdir $env_cmd $testfile $cmd $msg $opts
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg $opts
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg $opts
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg $opts
+ }
+
+ puts "\tRecd004.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/db-4.8.30/test/recd005.tcl b/db-4.8.30/test/recd005.tcl
new file mode 100644
index 0000000..4c5bc95
--- /dev/null
+++ b/db-4.8.30/test/recd005.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd005
+# TEST Verify reuse of file ids works on catastrophic recovery.
+# TEST
+# TEST Make sure that we can do catastrophic recovery even if we open
+# TEST files using the same log file id.
+proc recd005 { method args } {
+ source ./include.tcl
+ global rand_init
+
+ set envargs ""
+ set zero_idx [lsearch -exact $args "-zero_log"]
+ if { $zero_idx != -1 } {
+ set args [lreplace $args $zero_idx $zero_idx]
+ set envargs "-zero_log"
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd005: $method catastrophic recovery ($envargs)"
+
+ berkdb srand $rand_init
+
+ set testfile1 recd005.1.db
+ set testfile2 recd005.2.db
+ set max_locks 2000
+ set eflags "-create -txn -lock_max_locks $max_locks \
+ -lock_max_objects $max_locks -home $testdir $envargs"
+
+ set tnum 0
+ foreach sizes "{1000 10} {10 1000}" {
+ foreach ops "{abort abort} {abort commit} {commit abort} \
+ {commit commit}" {
+ env_cleanup $testdir
+ incr tnum
+
+ set s1 [lindex $sizes 0]
+ set s2 [lindex $sizes 1]
+ set op1 [lindex $ops 0]
+ set op2 [lindex $ops 1]
+ puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
+
+ puts "\tRecd005.$tnum.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the two databases.
+ set oflags \
+ "-create -mode 0644 -env $dbenv $args $omethod"
+ set db1 [eval {berkdb_open} $oflags $testfile1]
+ error_check_bad db_open $db1 NULL
+ error_check_good db_open [is_substr $db1 db] 1
+ error_check_good db_close [$db1 close] 0
+
+ set db2 [eval {berkdb_open} $oflags $testfile2]
+ error_check_bad db_open $db2 NULL
+ error_check_good db_open [is_substr $db2 db] 1
+ error_check_good db_close [$db2 close] 0
+ $dbenv close
+
+ set dbenv [eval $env_cmd]
+ puts "\tRecd005.$tnum.b: Populating databases"
+ eval {do_one_file $testdir \
+ $method $dbenv $env_cmd $testfile1 $s1 $op1 } $args
+ eval {do_one_file $testdir \
+ $method $dbenv $env_cmd $testfile2 $s2 $op2 } $args
+
+ puts "\tRecd005.$tnum.c: Verifying initial population"
+ eval {check_file \
+ $testdir $env_cmd $testfile1 $op1 } $args
+ eval {check_file \
+ $testdir $env_cmd $testfile2 $op2 } $args
+
+ # Now, close the environment (so that recovery will work
+ # on NT which won't allow delete of an open file).
+ reset_env $dbenv
+
+ berkdb debug_check
+ puts -nonewline \
+ "\tRecd005.$tnum.d: About to run recovery ... "
+ flush stdout
+
+ set stat [catch \
+ {exec $util_path/db_recover -h $testdir -c} \
+ result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ # Substitute a file that will need recovery and try
+ # running recovery again.
+ if { $op1 == "abort" } {
+ file copy -force $testdir/$testfile1.afterop \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile1.init \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 init copy
+ }
+ if { $op2 == "abort" } {
+ file copy -force $testdir/$testfile2.afterop \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile2.init \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 init copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\tRecd005.$tnum.e:\
+ About to run recovery on pre-op database ... "
+ flush stdout
+
+ set stat \
+ [catch {exec $util_path/db_recover \
+ -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ set dbenv [eval $env_cmd]
+ eval {check_file \
+ $testdir $env_cmd $testfile1 $op1 } $args
+ eval {check_file \
+ $testdir $env_cmd $testfile2 $op2 } $args
+ reset_env $dbenv
+
+ puts "\tRecd005.$tnum.f:\
+ Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch \
+ {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+ }
+ }
+}
+
+proc do_one_file { dir method env env_cmd filename num op args} {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ # Save the initial file and open the environment and the first file
+ file copy -force $dir/$filename $dir/$filename.init
+ copy_extent_file $dir $filename init
+ set oflags "-auto_commit -unknown -env $env"
+ set db [eval {berkdb_open} $oflags $args $filename]
+
+ # Dump out file contents for initial case
+ eval open_and_dump_file $filename $env $init_file nop \
+ dump_file_direction "-first" "-next" $args
+
+ set txn [$env txn]
+ error_check_bad txn_begin $txn NULL
+ error_check_good txn_begin [is_substr $txn $env] 1
+
+ # Now fill in the db and the txnid in the command
+ populate $db $method $txn $num 0 0
+
+ # Sync the file so that we can capture a snapshot to test
+ # recovery.
+ error_check_good sync:$db [$db sync] 0
+ file copy -force $dir/$filename $dir/$filename.afterop
+ copy_extent_file $dir $filename afterop
+ eval open_and_dump_file $testdir/$filename.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next" $args
+ error_check_good txn_$op:$txn [$txn $op] 0
+
+ if { $op == "commit" } {
+ puts "\t\tFile $filename executed and committed."
+ } else {
+ puts "\t\tFile $filename executed and aborted."
+ }
+
+ # Dump out file and save a copy.
+ error_check_good sync:$db [$db sync] 0
+ eval open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next" $args
+ file copy -force $dir/$filename $dir/$filename.final
+ copy_extent_file $dir $filename final
+
+ # If this is an abort, it should match the original file.
+ # If this was a commit, then this file should match the
+ # afterop file.
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+
+ error_check_good close:$db [$db close] 0
+}
+
+proc check_file { dir env_cmd filename op args} {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ eval open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next" $args
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+}
diff --git a/db-4.8.30/test/recd006.tcl b/db-4.8.30/test/recd006.tcl
new file mode 100644
index 0000000..c5b0981
--- /dev/null
+++ b/db-4.8.30/test/recd006.tcl
@@ -0,0 +1,268 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd006
+# TEST Nested transactions.
+proc recd006 { method {select 0} args } {
+ global kvals
+ source ./include.tcl
+
+ set envargs ""
+ set zero_idx [lsearch -exact $args "-zero_log"]
+ if { $zero_idx != -1 } {
+ set args [lreplace $args $zero_idx $zero_idx]
+ set envargs "-zero_log"
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd006 skipping for method $method"
+ return
+ }
+ puts "Recd006: $method nested transactions ($envargs)"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd006.db
+ set testfile $testdir/$dbfile
+
+ puts "\tRecd006.a: create database"
+ set oflags "-create $args $omethod $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set ret [$db put -nooverwrite $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+
+ # Variables used below:
+ # p1: a pair of keys that are likely to be on the same page.
+ # p2: a pair of keys that are likely to be on the same page,
+ # but on a page different than those in p1.
+ set dbc [$db cursor]
+ error_check_good dbc [is_substr $dbc $db] 1
+
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:DB_FIRST [llength $ret] 0
+ set p1 [lindex [lindex $ret 0] 0]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:DB_NEXT [llength $ret] 0
+ lappend p1 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -last]
+ error_check_bad dbc_get:DB_LAST [llength $ret] 0
+ set p2 [lindex [lindex $ret 0] 0]
+ set kvals($p2) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:DB_PREV [llength $ret] 0
+ lappend p2 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ # Now create the full transaction environment.
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd006.b: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Reset the environment.
+ reset_env $dbenv
+
+ set p1 [list $p1]
+ set p2 [list $p2]
+
+ # List of recovery tests: {CMD MSG} pairs
+ set rlist {
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit commit}
+ "Recd006.c: children (commit commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit commit}
+ "Recd006.d: children (commit commit)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit abort}
+ "Recd006.e: children (commit abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit abort}
+ "Recd006.f: children (commit abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort abort}
+ "Recd006.g: children (abort abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort abort}
+ "Recd006.h: children (abort abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort commit}
+ "Recd006.i: children (abort commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort commit}
+ "Recd006.j: children (abort commit)"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg $args
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg $args
+ }
+
+ puts "\tRecd006.k: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+# Do the nested transaction test.
+# We want to make sure that children inherit properly from their
+# parents and that locks are properly handed back to parents
+# and that the right thing happens on commit/abort.
+# In particular:
+# Write lock on parent, properly acquired by child.
+# Committed operation on child gives lock to parent so that
+# other child can also get the lock.
+# Aborted op by child releases lock so other child can get it.
+# Correct database state if child commits
+# Correct database state if child aborts
+proc nesttest { db parent env do p1 p2 child1 child2} {
+ global kvals
+ source ./include.tcl
+
+ if { $do == 1 } {
+ set func toupper
+ } else {
+ set func tolower
+ }
+
+ # Do an RMW on the parent to get a write lock.
+ set p10 [lindex $p1 0]
+ set p11 [lindex $p1 1]
+ set p20 [lindex $p2 0]
+ set p21 [lindex $p2 1]
+
+ set ret [$db get -rmw -txn $parent $p10]
+ set res $ret
+ set Dret [lindex [lindex $ret 0] 1]
+ if { [string compare $Dret $kvals($p10)] == 0 ||
+ [string compare $Dret [string toupper $kvals($p10)]] == 0 } {
+ set val 0
+ } else {
+ set val $Dret
+ }
+ error_check_good get_parent_RMW $val 0
+
+ # OK, do child 1
+ set kid1 [$env txn -parent $parent]
+ error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
+
+ # Reading write-locked parent object should be OK
+ #puts "\tRead write-locked parent object for kid1."
+ set ret [$db get -txn $kid1 $p10]
+ error_check_good kid1_get10 $ret $res
+
+ # Now update this child
+ set data [lindex [lindex [string $func $ret] 0] 1]
+ set ret [$db put -txn $kid1 $p10 $data]
+ error_check_good kid1_put10 $ret 0
+
+ #puts "\tKid1 successful put."
+
+ # Now start child2
+ #puts "\tBegin txn for kid2."
+ set kid2 [$env txn -parent $parent]
+ error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
+
+ # Getting anything in the p1 set should deadlock, so let's
+ # work on the p2 set.
+ set data [string $func $kvals($p20)]
+ #puts "\tPut data for kid2."
+ set ret [$db put -txn $kid2 $p20 $data]
+ error_check_good kid2_put20 $ret 0
+
+ #puts "\tKid2 data put successful."
+
+ # Now let's do the right thing to kid1
+ puts -nonewline "\tKid1 $child1..."
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good kid1_commit [$kid1 commit] 0
+ } else {
+ error_check_good kid1_abort [$kid1 abort] 0
+ }
+ puts "complete"
+
+ # In either case, child2 should now be able to get the
+ # lock, either because it is inherited by the parent
+ # (commit) or because it was released (abort).
+ set data [string $func $kvals($p11)]
+ set ret [$db put -txn $kid2 $p11 $data]
+ error_check_good kid2_put11 $ret 0
+
+ # Now let's do the right thing to kid2
+ puts -nonewline "\tKid2 $child2..."
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good kid2_commit [$kid2 commit] 0
+ } else {
+ error_check_good kid2_abort [$kid2 abort] 0
+ }
+ puts "complete"
+
+ # Now, let parent check that the right things happened.
+ # First get all four values
+ set p10_check [lindex [lindex [$db get -txn $parent $p10] 0] 0]
+ set p11_check [lindex [lindex [$db get -txn $parent $p11] 0] 0]
+ set p20_check [lindex [lindex [$db get -txn $parent $p20] 0] 0]
+ set p21_check [lindex [lindex [$db get -txn $parent $p21] 0] 0]
+
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good parent_kid1 $p10_check \
+ [string tolower [string $func $kvals($p10)]]
+ } else {
+ error_check_good \
+ parent_kid1 $p10_check [string tolower $kvals($p10)]
+ }
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good parent_kid2 $p11_check \
+ [string tolower [string $func $kvals($p11)]]
+ error_check_good parent_kid2 $p20_check \
+ [string tolower [string $func $kvals($p20)]]
+ } else {
+ error_check_good parent_kid2 $p11_check $kvals($p11)
+ error_check_good parent_kid2 $p20_check $kvals($p20)
+ }
+
+ # Now do a write on the parent for 21 whose lock it should
+ # either have or should be available.
+ set ret [$db put -txn $parent $p21 [string $func $kvals($p21)]]
+ error_check_good parent_put21 $ret 0
+
+ return 0
+}
diff --git a/db-4.8.30/test/recd007.tcl b/db-4.8.30/test/recd007.tcl
new file mode 100644
index 0000000..8c30306
--- /dev/null
+++ b/db-4.8.30/test/recd007.tcl
@@ -0,0 +1,1069 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd007
+# TEST File create/delete tests.
+# TEST
+# TEST This is a recovery test for create/delete of databases. We have
+# TEST hooks in the database so that we can abort the process at various
+# TEST points and make sure that the transaction doesn't commit. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd007 { method args } {
+ global fixed_len
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set envargs ""
+ set data_dir ""
+ set dir_cmd ""
+ set zero_idx [lsearch -exact $args "-zero_log"]
+ if { $zero_idx != -1 } {
+ set args [lreplace $args $zero_idx $zero_idx]
+ set envargs "-zero_log"
+ }
+ set zero_idx [lsearch -exact $args "-data_dir"]
+ if { $zero_idx != -1 } {
+ set end [expr $zero_idx + 1]
+ append envargs [lrange $args $zero_idx $end]
+ set data_dir [lrange $args $end $end]
+ set dir_cmd "if {\[file exists $testdir/$data_dir] == 0 } {exec mkdir $testdir/$data_dir} ; "
+ set args [lreplace $args $zero_idx $end]
+ }
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd007: $method operation/transaction tests ($envargs)"
+
+ # Create the database and environment.
+
+ set testfile recd007.db
+ set flags "-create -txn -home $testdir $envargs"
+
+ puts "\tRecd007.a: creating environment"
+ set env_cmd "$dir_cmd berkdb_env $flags"
+
+ set env [eval $env_cmd]
+
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 -env $env $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $env $testfile] 0
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good envclose [$env close] 0
+
+ # Convert the args again because fixed_len is now real.
+ set opts [convert_args $method $args]
+ set save_opts $opts
+ set moreopts {" -lorder 1234 " " -lorder 1234 -chksum " \
+ " -lorder 4321 " " -lorder 4321 -chksum "}
+
+ # List of recovery tests: {HOOKS MSG} pairs
+ # Where each HOOK is a list of {COPY ABORT}
+ #
+ set rlist {
+ { {"none" "preopen"} "Recd007.b0: none/preopen"}
+ { {"none" "postopen"} "Recd007.b1: none/postopen"}
+ { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"}
+ { {"none" "postlog"} "Recd007.b3: none/postlog"}
+ { {"none" "postsync"} "Recd007.b4: none/postsync"}
+ { {"postopen" "none"} "Recd007.c0: postopen/none"}
+ { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"}
+ { {"postlog" "none"} "Recd007.c2: postlog/none"}
+ { {"postsync" "none"} "Recd007.c3: postsync/none"}
+ { {"postopen" "postopen"} "Recd007.d: postopen/postopen"}
+ { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"}
+ { {"postopen" "postlog"} "Recd007.f: postopen/postlog"}
+ { {"postlog" "postlog"} "Recd007.g: postlog/postlog"}
+ { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"}
+ { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"}
+ { {"postlog" "postsync"} "Recd007.j: postlog/postsync"}
+ { {"postsync" "postsync"} "Recd007.k: postsync/postsync"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ #
+ # Run natively
+ #
+ file_recover_create $testdir $env_cmd $omethod \
+ $save_opts $testfile $cmd $msg $data_dir
+ foreach o $moreopts {
+ set opts $save_opts
+ append opts $o
+ file_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg $data_dir
+ }
+ }
+
+ set rlist {
+ { {"none" "predestroy"} "Recd007.l0: none/predestroy"}
+ { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
+ { {"predestroy" "none"} "Recd007.m0: predestroy/none"}
+ { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
+ { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
+ { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
+ { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
+ }
+
+ foreach op { dbremove dbrename dbtruncate } {
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_delete $testdir $env_cmd $omethod \
+ $save_opts $testfile $cmd $msg $op $data_dir
+ foreach o $moreopts {
+ set opts $save_opts
+ append opts $o
+ file_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg $op $data_dir
+ }
+ }
+ }
+
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ set env_cmd "$dir_cmd ; berkdb_env_noerr $flags"
+ do_file_recover_delmk $testdir $env_cmd $method $opts $testfile $data_dir
+ }
+
+ puts "\tRecd007.r: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+ set fixed_len $orig_fixed_len
+ return
+}
+
+proc file_recover_create { dir env_cmd method opts dbfile cmd msg data_dir} {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Creating just a database
+ # 2. Creating a database with a subdb
+ # 3. Creating a 2nd subdb in a database
+ puts "\t$msg ($opts) create with a database"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg $data_dir
+ if { [is_queue $method] == 1 || [is_partitioned $opts] == 1} {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg ($opts) create with a database and subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg $data_dir
+ puts "\t$msg ($opts) create with a database and 2nd subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg $data_dir
+
+}
+
+proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg data_dir} {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ set dflags "-dar"
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_create_loc $copy] 1
+ error_check_good abort_location [is_valid_create_loc $abort] 1
+
+ if {([string first "logmeta" $copy] != -1 || \
+ [string first "logmeta" $abort] != -1) && \
+ [is_btree $method] == 0 } {
+ puts "\tSkipping for method $method"
+ $env test copy none
+ $env test abort none
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+ return
+ }
+
+ # Basically non-existence is our initial state. When we
+ # abort, it is also our final state.
+ #
+ switch $sub {
+ 0 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile"
+ }
+ 1 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ set init_file $dir/$data_dir/$dbfile.init
+ catch { file copy -force $dir/$data_dir/$dbfile $init_file } res
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub1"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ puts "\t\tExecuting command"
+ set ret [catch {eval {berkdb_open} $oflags} db]
+
+ # Sync the mpool so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ $env mpool_sync
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ if {[string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_bad db_open ret 0
+
+ #
+ # Check that the file does not exist. Final state.
+ #
+ if { $sub != 2 } {
+ error_check_good db_open:exists \
+ [file exists $dir/$data_dir/$dbfile] 0
+ } else {
+ error_check_good \
+ diff(init,postcreate):diff($init_file,$dir/$data_dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $data_dir/$dbfile] 0
+ }
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ #
+ # Check that the file exists.
+ #
+ error_check_good db_open [file exists $dir/$data_dir/$dbfile] 1
+ set init_file $dir/$data_dir/$dbfile.init
+ catch { file copy -force $dir/$data_dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 || [is_partitioned $opts] == 1} {
+ copy_extent_file $dir/$data_dir $dbfile init
+ }
+ }
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on sub)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set env [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$env close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover1 [file exists $dir/$data_dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$data_dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $data_dir/$dbfile] 0
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$data_dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 || [is_partitioned $opts] == 1 } {
+ copy_extent_file $dir/$data_dir $dbfile init
+ }
+ }
+
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set env [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$env close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover2 [file exists $dir/$data_dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$data_dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $data_dir/$dbfile] 0
+ }
+
+}
+
+proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op data_dir} {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Deleting/Renaming just a database
+ # 2. Deleting/Renaming a database with a subdb
+ # 3. Deleting/Renaming a 2nd subdb in a database
+ puts "\t$msg $op ($opts) with a database"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg $op $data_dir
+ if { [is_queue $method] == 1 || [is_partitioned $opts] == 1} {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg $op ($opts) with a database and subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg $op $data_dir
+ puts "\t$msg $op ($opts) with a database and 2nd subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg $op $data_dir
+
+}
+
+proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op data_dir} {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_delete_loc $copy] 1
+ error_check_good abort_location [is_valid_delete_loc $abort] 1
+
+ if { [is_record_based $method] == 1 } {
+ set key1 1
+ set key2 2
+ } else {
+ set key1 recd007_key1
+ set key2 recd007_key2
+ }
+ set data1 recd007_data0
+ set data2 recd007_data1
+ set data3 NEWrecd007_data2
+
+ #
+ # Depending on what sort of subdb we want, if any, our
+ # args to the open call will be different (and if we
+ # want a 2nd subdb, we create the first here.
+ #
+ # XXX
+ # For dbtruncate, we want oflags to have "$env" in it,
+ # not have the value currently in 'env'. That is why
+ # the '$' is protected below. Later on we use oflags
+ # but with a new $env we just opened.
+ #
+ switch $sub {
+ 0 {
+ set subdb ""
+ set new $dbfile.new
+ set dflags "-dar"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile"
+ }
+ 1 {
+ set subdb sub0
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set subdb sub1
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ set ret [$db put -txn $txn $key2 $data2]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ $env mpool_sync
+
+ set init_file $dir/$data_dir/$dbfile.init
+ catch { file copy -force $dir/$data_dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 || [is_partitioned $opts] == 1} {
+ copy_extent_file $dir/$data_dir $dbfile init
+ }
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed.
+ #
+ switch $op {
+ "dbrename" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb $new } remret]
+ }
+ "dbremove" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb } remret]
+ }
+ "dbtruncate" {
+ set txn [$env txn]
+ set db [eval {berkdb_open_noerr -env} \
+ $env -auto_commit $opts $dbfile $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+ set ret [catch {$db truncate -txn $txn} remret]
+ }
+ }
+ $env mpool_sync
+ if { $abort == "none" } {
+ if { $op == "dbtruncate" } {
+ error_check_good txncommit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ }
+ #
+ # Operation was committed, verify it.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good $op $ret 0
+ #
+ # If a dbtruncate, check that truncate returned the number
+ # of items previously in the database.
+ #
+ if { [string compare $op "dbtruncate"] == 0 } {
+ error_check_good remret $remret 2
+ }
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags $data_dir
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ if { $op == "dbtruncate" } {
+ error_check_good txnabort [$txn abort] 0
+ error_check_good dbclose [$db close] 0
+ }
+ puts "\t\tCommand executed and aborted."
+ error_check_good $op $ret 1
+
+ #
+ # Check that the file exists. Final state.
+ # Compare against initial file.
+ #
+ error_check_good post$op.1 [file exists $dir/$data_dir/$dbfile] 1
+ error_check_good \
+ diff(init,post$op.2):diff($init_file,$dir/$data_dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $data_dir/$dbfile] 0
+ }
+ $env mpool_sync
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+ catch { file copy -force $dir/$data_dir/$dbfile $init_file } res
+ if { [is_queue $method] == 1 || [is_partitioned $opts] == 1} {
+ copy_extent_file $dir/$data_dir $dbfile init
+ }
+
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on abort)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set env [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$env close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+
+ puts "complete"
+
+ if { $abort == "none" } {
+ #
+ # Operate was committed.
+ #
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags $data_dir
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ berkdb debug_check
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$data_dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $data_dir/$dbfile] 0
+ }
+
+ #
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now restore the .afterop file(s) to their original name.
+ # Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set env [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$env close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+ puts "complete"
+
+ if { [string first "none" $abort] != -1} {
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags $data_dir
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$data_dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $data_dir/$dbfile] 0
+ }
+
+}
+
+#
+# This function tests a specific case of recovering after a db removal.
+# This is for SR #2538. Basically we want to test that:
+# - Make an env.
+# - Make/close a db.
+# - Remove the db.
+# - Create another db of same name.
+# - Sync db but leave open.
+# - Run recovery.
+# - Verify no recovery errors and that new db is there.
+proc do_file_recover_delmk { dir env_cmd method opts dbfile data_dir} {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+ set omethod [convert_method $method]
+
+ puts "\tRecd007.q: Delete and recreate a database"
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd007_key
+ }
+ set data1 recd007_data
+ set data2 NEWrecd007_data2
+ set data3 LASTrecd007_data3
+
+ set oflags \
+ "-create $omethod -auto_commit -mode 0644 $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ file copy -force $testdir/$data_dir/$dbfile $testdir/$data_dir/${dbfile}.1
+
+ set ret \
+ [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
+
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good dbremove $ret 0
+ error_check_good dbremove.1 [file exists $dir/$data_dir/$dbfile] 0
+
+ #
+ # Now create a new db with the same name.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key [chop_data $method $data2]]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set envr [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$envr close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+ puts "complete"
+# error_check_good db_recover $stat 0
+ error_check_good file_exist [file exists $dir/$data_dir/$dbfile] 1
+ #
+ # Since we ran recovery on the open db/env, we need to
+ # catch these calls. Basically they are there to clean
+ # up the Tcl widgets.
+ #
+ set stat [catch {$db close} ret]
+ error_check_bad dbclose_after_remove $stat 0
+ error_check_good dbclose_after_remove [is_substr $ret recovery] 1
+ set stat [catch {$env log_flush} ret]
+ set stat [catch {$env close} ret]
+ error_check_bad envclose_after_remove $stat 0
+ error_check_good envclose_after_remove [is_substr $ret recovery] 1
+
+ #
+ # Reopen env and db and verify 2nd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
+
+ error_check_good dbclose [$db close] 0
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good envclose [$env close] 0
+
+ #
+ # Copy back the original database and run recovery again.
+ # SR [#13026]
+ #
+ puts "\t\tRecover from first database"
+ file copy -force $testdir/$data_dir/${dbfile}.1 $testdir/$data_dir/$dbfile
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set env [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$env close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+ puts "complete"
+# error_check_good db_recover $stat 0
+ error_check_good db_recover.1 [file exists $dir/$data_dir/$dbfile] 1
+
+ #
+ # Reopen env and db and verify 2nd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
+
+ error_check_good dbclose [$db close] 0
+
+ file copy -force $testdir/$data_dir/$dbfile $testdir/$data_dir/${dbfile}.2
+
+ puts "\t\tRemove second db"
+ set ret \
+ [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
+
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good dbremove $ret 0
+ error_check_good dbremove.2 [file exists $dir/$data_dir/$dbfile] 0
+
+ #
+ # Now create a new db with the same name.
+ #
+ puts "\t\tAdd a third version of the database"
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key [chop_data $method $data3]]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set envr [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$envr close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+ puts "complete"
+# error_check_good db_recover $stat 0
+ error_check_good file_exist [file exists $dir/$data_dir/$dbfile] 1
+
+ #
+ # Since we ran recovery on the open db/env, we need to
+ # catch these calls to clean up the Tcl widgets.
+ #
+ set stat [catch {$db close} ret]
+ error_check_bad dbclose_after_remove $stat 0
+ error_check_good dbclose_after_remove [is_substr $ret recovery] 1
+ set stat [catch {$env log_flush} ret]
+ set stat [catch {$env close} ret]
+ error_check_bad envclose_after_remove $stat 0
+ error_check_good envclose_after_remove [is_substr $ret recovery] 1
+
+ #
+ # Copy back the second database and run recovery again.
+ #
+ puts "\t\tRecover from second database"
+ file copy -force $testdir/$data_dir/${dbfile}.2 $testdir/$data_dir/$dbfile
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set envr [eval $env_cmd -recover_fatal]
+ error_check_good env_close [$envr close] 0
+# set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+# if { $stat == 1 } {
+# error "FAIL: Recovery error: $result."
+# return
+# }
+ puts "complete"
+# error_check_good db_recover $stat 0
+ error_check_good file_exist.2 [file exists $dir/$data_dir/$dbfile] 1
+
+ #
+ # Reopen env and db and verify 3rd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data3]
+
+ error_check_good dbclose [$db close] 0
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good envclose [$env close] 0
+}
+
+proc is_valid_create_loc { loc } {
+ switch $loc {
+ none -
+ preopen -
+ postopen -
+ postlogmeta -
+ postlog -
+ postsync
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+proc is_valid_delete_loc { loc } {
+ switch $loc {
+ none -
+ predestroy -
+ postdestroy -
+ postremcall
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+# Do a logical diff on the db dump files. We expect that either
+# the files are identical, or if they differ, that it is exactly
+# just a free/invalid page.
+# Return 1 if they are different, 0 if logically the same (or identical).
+#
+proc dbdump_diff { flags initfile dir dbfile } {
+ source ./include.tcl
+
+ set initdump $initfile.dump
+ set dbdump $dbfile.dump
+
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
+ $initfile} ret]
+ error_check_good "dbdump.init $flags $initfile" $stat 0
+
+ # Do a dump without the freelist which should eliminate any
+ # recovery differences.
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
+ $dir/$dbfile} ret]
+ error_check_good dbdump.db $stat 0
+
+ set stat [filecmp $dir/$dbdump $initdump]
+
+ if {$stat == 0} {
+ return 0
+ }
+ puts "diff: $dbdump $initdump gives:\n$ret"
+ return 1
+}
+
+proc recd007_check { op sub dir dbfile subdb new env oflags data_dir} {
+ #
+ # No matter how many subdbs we have, dbtruncate will always
+ # have a file, and if we open our particular db, it should
+ # have no entries.
+ #
+ if { $sub == 0 } {
+ if { $op == "dbremove" } {
+ error_check_good $op:not-exist:$dir/$dbfile \
+ [file exists $dir/$data_dir/$dbfile] 0
+ } elseif { $op == "dbrename"} {
+ error_check_good $op:exist \
+ [file exists $dir/$data_dir/$dbfile] 0
+ error_check_good $op:exist2 \
+ [file exists $dir/$data_dir/$dbfile.new] 1
+ } else {
+ error_check_good $op:exist \
+ [file exists $dir/$data_dir/$dbfile] 1
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget1 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ return
+ } else {
+ set t1 $dir/t1
+ #
+ # If we have subdbs, check that all but the last one
+ # are there, and the last one is correctly operated on.
+ #
+ set db [berkdb_open -rdonly -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set c [eval {$db cursor}]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ set d [$c get -last]
+ if { $op == "dbremove" } {
+ if { $sub == 1 } {
+ error_check_good subdb:rem [llength $d] 0
+ } else {
+ error_check_bad subdb:rem [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_bad subdb:rem1 $sdb $subdb
+ }
+ } elseif { $op == "dbrename"} {
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren $sdb $new
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren1 \
+ [is_substr "new" $sdb] 0
+ }
+ } else {
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$dbt cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $dbt] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget2 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env \
+ $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_bad dbget3 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ }
+ }
+ error_check_good dbcclose [$c close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc copy_afterop { dir } {
+ set r [catch { set filecopy [glob $dir/*.afterop] } res]
+ if { $r == 1 } {
+ return
+ }
+ foreach f $filecopy {
+ set orig [string range $f 0 \
+ [expr [string last "." $f] - 1]]
+ catch { file rename -force $f $orig} res
+ }
+}
diff --git a/db-4.8.30/test/recd008.tcl b/db-4.8.30/test/recd008.tcl
new file mode 100644
index 0000000..984d0d2
--- /dev/null
+++ b/db-4.8.30/test/recd008.tcl
@@ -0,0 +1,226 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd008
+# TEST Test deeply nested transactions and many-child transactions.
+proc recd008 { method {breadth 4} {depth 4} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd008: $method $breadth X $depth deeply nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd008.db
+
+ puts "\tRecd008.a: create database"
+ set db [eval {berkdb_open -create} $args $omethod $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $count == 500} {
+ set p1 $key
+ set kvals($p1) $str
+ }
+ set ret [$db put $key [chop_data $method $str]]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+
+ set txn_max [expr int([expr pow($breadth,$depth)])]
+ if { $txn_max < 20 } {
+ set txn_max 20
+ }
+
+ puts "\tRecd008.b: create environment for $txn_max transactions"
+
+ set max_locks 2500
+ set eflags "-mode 0644 -create -lock_max_locks $max_locks \
+ -lock_max_objects $max_locks -txn_max $txn_max -txn -home $testdir"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ reset_env $dbenv
+
+ set rlist {
+ { {recd008_parent abort ENV DB $method $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.c: child abort parent" }
+ { {recd008_parent commit ENV DB $method $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.d: child commit parent" }
+ }
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg $args
+ eval recd008_setkval $dbfile $p1 $args
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg $args
+ eval recd008_setkval $dbfile $p1 $args
+ }
+
+ puts "\tRecd008.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc recd008_setkval { dbfile p1 args} {
+ global kvals
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $args $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get $p1]
+ error_check_good dbclose [$db close] 0
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+}
+
+# This is a lot like the op_recover procedure. We cannot use that
+# because it was not meant to be called recursively. This proc
+# knows about depth/breadth and file naming so that recursive calls
+# don't overwrite various initial and afterop files, etc.
+#
+# The basic flow of this is:
+# (Initial file)
+# Parent begin transaction (in op_recover)
+# Parent starts children
+# Recursively call recd008_recover
+# (children modify p1)
+# Parent modifies p1
+# (Afterop file)
+# Parent commit/abort (in op_recover)
+# (Final file)
+# Recovery test (in op_recover)
+proc recd008_parent { op env db method p1key parent b0 d0 breadth depth } {
+ global kvals
+ source ./include.tcl
+
+ #
+ # Save copy of original data
+ # Acquire lock on data
+ #
+ set olddata [pad_data $method $kvals($p1key)]
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Parent spawns off children
+ #
+ set ret [recd008_txn $op $env $db $method $p1key $parent \
+ $b0 $d0 $breadth $depth]
+
+ puts "Child runs complete. Parent modifies data."
+
+ #
+ # Parent modifies p1
+ #
+ set newdata $olddata.parent
+ set ret [$db put -txn $parent $p1key [chop_data $method $newdata]]
+ error_check_good db_put $ret 0
+
+ #
+ # Save value in kvals for later comparison
+ #
+ switch $op {
+ "commit" {
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ set kvals($p1key) $olddata
+ }
+ }
+ return 0
+}
+
+proc recd008_txn { op env db method p1key parent b0 d0 breadth depth } {
+ global log_log_record_types
+ global kvals
+ source ./include.tcl
+
+ for {set d 1} {$d < $d0} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Recd008_txn: $op parent:$parent $breadth $depth ($b0 $d0)"
+
+ # Save the initial file and open the environment and the file
+ for {set b $b0} {$b <= $breadth} {incr b} {
+ #
+ # Begin child transaction
+ #
+ set t [$env txn -parent $parent]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ set startd [expr $d0 + 1]
+ set child $b:$startd:$t
+ set olddata [pad_data $method $kvals($p1key)]
+ set newdata $olddata.$child
+ set ret [$db get -rmw -txn $t $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Recursively call to set up nested transactions/children
+ #
+ for {set d $startd} {$d <= $depth} {incr d} {
+ set ret [recd008_txn commit $env $db $method $p1key $t \
+ $b $d $breadth $depth]
+ set ret [recd008_txn abort $env $db $method $p1key $t \
+ $b $d $breadth $depth]
+ }
+ #
+ # Modifies p1.
+ #
+ set ret [$db put -txn $t $p1key [chop_data $method $newdata]]
+ error_check_good db_put $ret 0
+
+ #
+ # Commit or abort
+ #
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Executing txn_$op:$t"
+ error_check_good txn_$op:$t [$t $op] 0
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ set newdata [pad_data $method $newdata]
+ switch $op {
+ "commit" {
+ puts "Command executed and committed."
+ error_check_good get_parent_RMW $Dret $newdata
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ puts "Command executed and aborted."
+ error_check_good get_parent_RMW $Dret $olddata
+ set kvals($p1key) $olddata
+ }
+ }
+ }
+ return 0
+}
diff --git a/db-4.8.30/test/recd009.tcl b/db-4.8.30/test/recd009.tcl
new file mode 100644
index 0000000..a3f7899
--- /dev/null
+++ b/db-4.8.30/test/recd009.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd009
+# TEST Verify record numbering across split/reverse splits and recovery.
+proc recd009 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { [is_rbtree $method] != 1 && [is_rrecno $method] != 1} {
+ puts "Recd009 skipping for method $method."
+ return
+ }
+
+ set opts [convert_args $method $args]
+ set method [convert_method $method]
+
+ puts "\tRecd009: Test record numbers across splits and recovery"
+
+ set testfile recd009.db
+ env_cleanup $testdir
+ set mkeys 1000
+ set nkeys 5
+ set data "data"
+
+ puts "\tRecd009.a: Create $method environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd009.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { [is_recno $method] == 1 } {
+ set key $i
+ } else {
+ set key key000$i
+ }
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ set newnkeys [expr $nkeys + 1]
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {recd009_split DB TXNID 1 $method $newnkeys $mkeys}
+ "Recd009.c: split"}
+ { {recd009_split DB TXNID 0 $method $newnkeys $mkeys}
+ "Recd009.d: reverse split"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ if { $reverse == -1 } {
+ set abortkeys $nkeys
+ set commitkeys $mkeys
+ set abortpg 0
+ set commitpg 1
+ } else {
+ set abortkeys $mkeys
+ set commitkeys $nkeys
+ set abortpg 1
+ set commitpg 0
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg $args
+ recd009_recnocheck $testdir $testfile $opts $abortkeys $abortpg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg $args
+ recd009_recnocheck $testdir $testfile $opts \
+ $commitkeys $commitpg
+ }
+ puts "\tRecd009.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd009_recnocheck { tdir testfile opts numkeys numpg} {
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRecd009_recnocheck: Verify page count of $numpg on split."
+ set stat [$db stat]
+ error_check_bad stat:check-split [is_substr $stat \
+ "{{Internal pages} 0}"] $numpg
+
+ set type [$db get_type]
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ set i 1
+ puts "\tRecd009_recnocheck: Checking $numkeys record numbers."
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ if { [is_btree $type] } {
+ set thisi [$dbc get -get_recno]
+ } else {
+ set thisi [lindex [lindex $d 0] 0]
+ }
+ error_check_good recno_check $i $thisi
+ error_check_good record_count [expr $i <= $numkeys] 1
+ incr i
+ }
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd009_split { db txn split method nkeys mkeys } {
+ global errorCode
+ source ./include.tcl
+
+ set data "data"
+
+ set isrecno [is_recno $method]
+ # if mkeys is above 1000, need to adjust below for lexical order
+ if { $split == 1 } {
+ puts "\tRecd009_split: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i <= $mkeys } { incr i } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ set ret [$db put -txn $txn $key $data$i]
+ error_check_good dbput:more $ret 0
+ }
+ } else {
+ puts "\tRecd009_split: Delete added keys to force reverse split."
+ # Since rrecno renumbers, we delete downward.
+ for {set i $mkeys} { $i >= $nkeys } { set i [expr $i - 1] } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ error_check_good db_del:$i [$db del -txn $txn $key] 0
+ }
+ }
+ return 0
+}
diff --git a/db-4.8.30/test/recd010.tcl b/db-4.8.30/test/recd010.tcl
new file mode 100644
index 0000000..3e573a6
--- /dev/null
+++ b/db-4.8.30/test/recd010.tcl
@@ -0,0 +1,256 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd010
+# TEST Test stability of btree duplicates across btree off-page dup splits
+# TEST and reverse splits and across recovery.
+proc recd010 { method {select 0} args} {
+ if { [is_btree $method] != 1 } {
+ puts "Recd010 skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd010: skipping for specific pagesizes"
+ return
+ }
+ set largs $args
+ append largs " -dup "
+ recd010_main $method $select $largs
+ append largs " -dupsort "
+ recd010_main $method $select $largs
+}
+
+proc recd010_main { method select largs } {
+ global fixed_len
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+
+ set opts [convert_args $method $largs]
+ set method [convert_method $method]
+
+ puts "Recd010 ($opts): Test duplicates across splits and recovery"
+
+ set testfile recd010.db
+ env_cleanup $testdir
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set mkeys 1000
+ set firstkeys 5
+ set data "data"
+ set key "recd010_key"
+
+ puts "\tRecd010.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd010.b: Fill page with $firstkeys small dups."
+ for { set i 1 } { $i <= $firstkeys } { incr i } {
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ set kvals 1
+ set kvals_dups $firstkeys
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # List of recovery tests: {CMD MSG} pairs.
+ if { $mkeys < 100 } {
+ puts "Recd010 mkeys of $mkeys too small"
+ return
+ }
+ set rlist {
+ { {recd010_split DB TXNID 1 2 $mkeys}
+ "Recd010.c: btree split 2 large dups"}
+ { {recd010_split DB TXNID 0 2 $mkeys}
+ "Recd010.d: btree reverse split 2 large dups"}
+ { {recd010_split DB TXNID 1 10 $mkeys}
+ "Recd010.e: btree split 10 dups"}
+ { {recd010_split DB TXNID 0 10 $mkeys}
+ "Recd010.f: btree reverse split 10 dups"}
+ { {recd010_split DB TXNID 1 100 $mkeys}
+ "Recd010.g: btree split 100 dups"}
+ { {recd010_split DB TXNID 0 100 $mkeys}
+ "Recd010.h: btree reverse split 100 dups"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg $largs
+ recd010_check $testdir $testfile $opts abort $reverse $firstkeys
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg $largs
+ recd010_check $testdir $testfile $opts commit $reverse $firstkeys
+ }
+ puts "\tRecd010.i: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd010_check { tdir testfile opts op reverse origdups } {
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set data "data"
+
+ if { $reverse == -1 } {
+ puts "\tRecd010_check: Verify split after $op"
+ } else {
+ puts "\tRecd010_check: Verify reverse split after $op"
+ }
+
+ set stat [$db stat]
+ if { [expr ([string compare $op "abort"] == 0 && $reverse == -1) || \
+ ([string compare $op "commit"] == 0 && $reverse != -1)]} {
+ set numkeys 0
+ set allkeys [expr $numkeys + 1]
+ set numdups $origdups
+ #
+ # If we abort the adding of dups, or commit
+ # the removal of dups, either way check that
+ # we are back at the beginning. Check that:
+ # - We have 0 internal pages.
+ # - We have only 1 key (the original we primed the db
+ # with at the beginning of the test).
+ # - We have only the original number of dups we primed
+ # the db with at the beginning of the test.
+ #
+ error_check_good stat:orig0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:orig1 [is_substr $stat \
+ "{{Number of keys} 1}"] 1
+ error_check_good stat:orig2 [is_substr $stat \
+ "{{Number of records} $origdups}"] 1
+ } else {
+ set numkeys $kvals
+ set allkeys [expr $numkeys + 1]
+ set numdups $kvals_dups
+ #
+ # If we abort the removal of dups, or commit the
+ # addition of dups, check that:
+ # - We have > 0 internal pages.
+ # - We have the number of keys.
+ #
+ error_check_bad stat:new0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:new1 [is_substr $stat \
+ "{{Number of keys} $allkeys}"] 1
+ }
+
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ puts "\tRecd010_check: Checking key and duplicate values"
+ set key "recd010_key"
+ #
+ # Check dups are there as they should be.
+ #
+ for {set ki 0} {$ki < $numkeys} {incr ki} {
+ set datacnt 0
+ for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
+ set d [$dbc get -nextdup]} {
+ set thisdata [lindex [lindex $d 0] 1]
+ if { $datacnt < 10 } {
+ set pdata $data.$ki.00$datacnt
+ } elseif { $datacnt < 100 } {
+ set pdata $data.$ki.0$datacnt
+ } else {
+ set pdata $data.$ki.$datacnt
+ }
+ error_check_good dup_check $thisdata $pdata
+ incr datacnt
+ }
+ error_check_good dup_count $datacnt $numdups
+ }
+ #
+ # Check that the number of expected keys (allkeys) are
+ # all of the ones that exist in the database.
+ #
+ set dupkeys 0
+ set lastkey ""
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ set thiskey [lindex [lindex $d 0] 0]
+ if { [string compare $lastkey $thiskey] != 0 } {
+ incr dupkeys
+ }
+ set lastkey $thiskey
+ }
+ error_check_good key_check $allkeys $dupkeys
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd010_split { db txn split nkeys mkeys } {
+ global errorCode
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set data "data"
+ set key "recd010_key"
+
+ set numdups [expr $mkeys / $nkeys]
+
+ set kvals $nkeys
+ set kvals_dups $numdups
+ if { $split == 1 } {
+ puts \
+"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ for {set i 0} { $i < $numdups } { incr i } {
+ if { $i < 10 } {
+ set pdata $data.$k.00$i
+ } elseif { $i < 100 } {
+ set pdata $data.$k.0$i
+ } else {
+ set pdata $data.$k.$i
+ }
+ set ret [$db put -txn $txn $key$k $pdata]
+ error_check_good dbput:more $ret 0
+ }
+ }
+ } else {
+ puts \
+"\tRecd010_split: Delete $nkeys keys to force reverse split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ error_check_good db_del:$k [$db del -txn $txn $key$k] 0
+ }
+ }
+ return 0
+}
diff --git a/db-4.8.30/test/recd011.tcl b/db-4.8.30/test/recd011.tcl
new file mode 100644
index 0000000..b30f3b5
--- /dev/null
+++ b/db-4.8.30/test/recd011.tcl
@@ -0,0 +1,135 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd011
+# TEST Verify that recovery to a specific timestamp works.
+proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
+ source ./include.tcl
+ global rand_init
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum "011"
+
+ puts "Recd$tnum ($method $args): Test recovery to a specific timestamp."
+
+ set testfile recd$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set bigkey 1001
+ } else {
+ set key KEY
+ set bigkey BIGKEY
+ }
+
+ puts "\tRecd$tnum.a: Create environment and database."
+ set bufsize [expr 8 * 1024]
+ set maxsize [expr 8 * $bufsize]
+ set flags "-create -txn -home $testdir -log_buffer $bufsize \
+ -log_max $maxsize"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Main loop: every second or so, increment the db in a txn.
+ puts "\t\tInitial Checkpoint"
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd$tnum.b ($niter iterations):\
+ Transaction-protected increment loop."
+ for { set i 0 } { $i <= $niter } { incr i } {
+ set str [random_data 4096 0 NOTHING]
+ set data $i
+ set bigdata $i$str
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good db_put \
+ [$db put -txn $txn $bigkey [chop_data $method $bigdata]] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # We need to sleep before taking the timestamp to guarantee
+ # that the timestamp is *after* this transaction commits.
+ # Since the resolution of the system call used by Berkeley DB
+ # is less than a second, rounding to the nearest second can
+ # otherwise cause off-by-one errors in the test.
+ tclsleep $sleep_time
+
+ set timeof($i) [timestamp -r]
+
+ # If an appropriate period has elapsed, checkpoint.
+ if { $i % $ckpt_freq == $ckpt_freq - 1 } {
+ puts "\t\tIteration $i: Checkpointing."
+ error_check_good ckpt($i) [$dbenv txn_checkpoint] 0
+ }
+
+ # Sleep again to ensure that the next operation definitely
+ # occurs after the timestamp.
+ tclsleep $sleep_time
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd$tnum.c: Recover to each timestamp and check."
+ for { set i $niter } { $i >= 0 } { incr i -1 } {
+
+ # Run db_recover.
+ set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
+ # puts $t
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover($i,$t,$r) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $args $testdir/$testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum [lindex [lindex $dbt 0] 1]
+ error_check_good timestamp_recover $datum [pad_data $method $i]
+
+ error_check_good db_close [$db close] 0
+ }
+
+ # Finally, recover to a time well before the first timestamp
+ # and well after the last timestamp. The latter should
+ # be just like the timestamp of the last test performed;
+ # the former should fail.
+ puts "\tRecd$tnum.d: Recover to before the first timestamp."
+ set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_bad db_recover(before,$t) $ret 0
+
+ puts "\tRecd$tnum.e: Recover to after the last timestamp."
+ set t [clock format \
+ [expr $timeof($niter) + 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover(after,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $args $testdir/$testfile]
+ error_check_good db_open(after) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum2 [lindex [lindex $dbt 0] 1]
+
+ error_check_good timestamp_recover $datum2 $datum
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/recd012.tcl b/db-4.8.30/test/recd012.tcl
new file mode 100644
index 0000000..b9ce791
--- /dev/null
+++ b/db-4.8.30/test/recd012.tcl
@@ -0,0 +1,433 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd012
+# TEST Test of log file ID management. [#2288]
+# TEST Test recovery handling of file opens and closes.
+proc recd012 { method {start 0} \
+ {niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
+ source ./include.tcl
+
+ set tnum "012"
+ set pagesize 512
+
+ if { $is_qnx_test } {
+ set niter 40
+ }
+
+ puts "Recd$tnum $method ($args): Test recovery file management."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd012: skipping for specific pagesizes"
+ return
+ }
+
+ for { set i $start } { $i <= $niter } { incr i } {
+ env_cleanup $testdir
+
+ # For repeatability, we pass in the iteration number
+ # as a parameter and use that in recd012_body to seed
+ # the random number generator to randomize our operations.
+ # This lets us re-run a potentially failing iteration
+ # without having to start from the beginning and work
+ # our way to it.
+ #
+ # The number of databases ranges from 4 to 8 and is
+ # a function of $niter
+ # set ndbs [expr ($i % 5) + 4]
+
+ recd012_body \
+ $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
+ }
+}
+
+proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} } {
+ global alphabet rand_init fixed_len recd012_ofkey recd012_ofckptkey
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ puts "\tRecd$tnum $method ($largs): Iteration $iter"
+ puts "\t\tRecd$tnum.a: Create environment and $ndbs databases."
+
+ # We run out of lockers during some of the recovery runs, so
+ # we need to make sure that we specify a DB_CONFIG that will
+ # give us enough lockers.
+ set f [open $testdir/DB_CONFIG w]
+ puts $f "set_lk_max_lockers 5000"
+ close $f
+
+ set flags "-create -txn -home $testdir"
+ set env_cmd "berkdb_env $flags"
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Initialize random number generator based on $iter.
+ berkdb srand [expr $iter + $rand_init]
+
+ # Initialize database that keeps track of number of open files (so
+ # we don't run out of descriptors).
+ set ofname of.db
+ set txn [$dbenv txn]
+ error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn\
+ -create -dup -mode 0644 -btree -pagesize 512 $ofname]
+ error_check_good of_open [is_valid_db $ofdb] TRUE
+ error_check_good open_txn_commit [$txn commit] 0
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
+ error_check_good of_put2 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_put3 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_txn_commit [$oftxn commit] 0
+ error_check_good of_close [$ofdb close] 0
+
+ # Create ndbs databases to work in, and a file listing db names to
+ # pick from.
+ set f [open $testdir/dblist w]
+
+ set oflags "-auto_commit -env $dbenv \
+ -create -mode 0644 -pagesize $psz $largs $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ # 50-50 chance of being a subdb, unless we're a queue or partitioned.
+ if { [berkdb random_int 0 1] || \
+ [is_queue $method] || [is_partitioned $largs] } {
+ # not a subdb
+ set dbname recd$tnum-$i.db
+ } else {
+ # subdb
+ set dbname "recd$tnum-subdb.db s$i"
+ }
+ puts $f $dbname
+ set db [eval {berkdb_open} $oflags $dbname]
+ error_check_good db($i) [is_valid_db $db] TRUE
+ error_check_good db($i)_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close [$dbenv close] 0
+
+ # Now we get to the meat of things. Our goal is to do some number
+ # of opens, closes, updates, and shutdowns (simulated here by a
+ # close of all open handles and a close/reopen of the environment,
+ # with or without an envremove), matching the regular expression
+ #
+ # ((O[OUC]+S)+R+V)
+ #
+ # We'll repeat the inner + a random number up to $niniter times,
+ # and the outer + a random number up to $noutiter times.
+ #
+ # In order to simulate shutdowns, we'll perform the opens, closes,
+ # and updates in a separate process, which we'll exit without closing
+ # all handles properly. The environment will be left lying around
+ # before we run recovery 50% of the time.
+ set out [berkdb random_int 1 $noutiter]
+ puts \
+ "\t\tRecd$tnum.b: Performing $out recoveries of up to $niniter ops."
+ for { set i 0 } { $i < $out } { incr i } {
+ set child [open "|$tclsh_path" w]
+
+ # For performance, don't source everything,
+ # just what we'll need.
+ puts $child "load $tcllib"
+ puts $child "set fixed_len $fixed_len"
+ puts $child "source $src_root/test/testutils.tcl"
+ puts $child "source $src_root/test/recd$tnum.tcl"
+
+ set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
+
+ # Go.
+ berkdb debug_check
+ puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
+ $ndbs $tnum $method $ofname $largs"
+ close $child
+
+ # Run recovery 0-3 times.
+ set nrecs [berkdb random_int 0 3]
+ for { set j 0 } { $j < $nrecs } { incr j } {
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover \
+ -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ file mkdir /tmp/12out
+ set fd [open /tmp/12out/[pid] w]
+ puts $fd $res
+ close $fd
+ }
+ error_check_good recover($j) $ret 0
+ }
+ }
+
+ # Run recovery one final time; it doesn't make sense to
+ # check integrity if we do not.
+ set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ puts $res
+ }
+
+ # Make sure each datum is the correct filename.
+ puts "\t\tRecd$tnum.c: Checking data integrity."
+ set dbenv [berkdb_env -create -private -home $testdir]
+ error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbinfo] > 0 } {
+ set db [eval berkdb_open -env $dbenv $largs $dbinfo]
+ error_check_good dbopen($dbinfo) [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good integrity [lindex [lindex $dbt 0] 1] \
+ [pad_data $method $dbinfo]
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close_integrity [$dbenv close] 0
+
+ # Verify
+ error_check_good verify \
+ [verify_dir $testdir "\t\tRecd$tnum.d: " 0 0 1] 0
+}
+
+proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
+ ofname args } {
+ global recd012_ofkey
+ source ./include.tcl
+ if { [is_record_based $method] } {
+ set keybase ""
+ } else {
+ set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
+ }
+
+ # Initialize our random number generator, repeatably based on an arg.
+ berkdb srand $rnd
+
+ # Open our env.
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Find out how many databases appear to be open in the log--we
+ # don't want recovery to run out of filehandles.
+ set txn [$dbenv txn]
+ error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
+ error_check_good child_txn_commit [$txn commit] 0
+
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
+ error_check_good of_get [lindex [lindex $dbt 0] 0] $recd012_ofkey
+ set nopenfiles [lindex [lindex $dbt 0] 1]
+
+ error_check_good of_commit [$oftxn commit] 0
+
+ # Read our dbnames
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbname($i)] > 0 } {
+ incr i
+ }
+ close $f
+
+ # We now have $ndbs extant databases.
+ # Open one of them, just to get us started.
+ set opendbs {}
+ set oflags "-env $dbenv $args"
+
+ # Start a transaction, just to get us started.
+ set curtxn [$dbenv txn]
+ error_check_good txn [is_valid_txn $curtxn $dbenv] TRUE
+
+ # Inner loop. Do $in iterations of a random open, close, or
+ # update, where $in is between 1 and $niniter.
+ set in [berkdb random_int 1 $niniter]
+ for { set j 0 } { $j < $in } { incr j } {
+ set op [berkdb random_int 0 2]
+ switch $op {
+ 0 {
+ # Open.
+ recd012_open
+ }
+ 1 {
+ # Update. Put random-number$keybase as key,
+ # filename as data, into random database.
+ set num_open [llength $opendbs]
+ if { $num_open == 0 } {
+ # If none are open, do an open first.
+ recd012_open
+ set num_open [llength $opendbs]
+ }
+ set n [berkdb random_int 0 [expr $num_open - 1]]
+ set pair [lindex $opendbs $n]
+ set udb [lindex $pair 0]
+ set uname [lindex $pair 1]
+
+ set key [berkdb random_int 1000 1999]$keybase
+ set data [chop_data $method $uname]
+ error_check_good put($uname,$udb,$key,$data) \
+ [$udb put -txn $curtxn $key $data] 0
+
+ # One time in four, commit the transaction.
+ if { [berkdb random_int 0 3] == 0 && 0 } {
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+ 2 {
+ # Close.
+ if { [llength $opendbs] == 0 } {
+ # If none are open, open instead of closing.
+ recd012_open
+ continue
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 \
+ [expr [llength $opendbs] - 1]]
+
+ set db [lindex [lindex $opendbs $which] 0]
+ error_check_good db_choice [is_valid_db $db] TRUE
+ global errorCode errorInfo
+
+ error_check_good db_close \
+ [[lindex [lindex $opendbs $which] 0] close] 0
+
+ set opendbs [lreplace $opendbs $which $which]
+ incr nopenfiles -1
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+
+ # One time in two hundred, checkpoint.
+ if { [berkdb random_int 0 199] == 0 } {
+ puts "\t\t\tRecd$tnum:\
+ Random checkpoint after operation $outiter.$j."
+ error_check_good txn_ckpt \
+ [$dbenv txn_checkpoint] 0
+ set nopenfiles \
+ [recd012_nopenfiles_ckpt $dbenv $ofdb $nopenfiles]
+ }
+ }
+
+ # We have to commit curtxn. It'd be kind of nice not to, but
+ # if we start in again without running recovery, we may block
+ # ourselves.
+ error_check_good curtxn_commit [$curtxn commit] 0
+
+ # Put back the new number of open files.
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_del [$ofdb del -txn $oftxn $recd012_ofkey] 0
+ error_check_good of_put \
+ [$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
+ error_check_good of_commit [$oftxn commit] 0
+ error_check_good ofdb_close [$ofdb close] 0
+}
+
+proc recd012_open { } {
+ # This is basically an inline and has to modify curtxn,
+ # so use upvars.
+ upvar curtxn curtxn
+ upvar ndbs ndbs
+ upvar dbname dbname
+ upvar dbenv dbenv
+ upvar oflags oflags
+ upvar opendbs opendbs
+ upvar nopenfiles nopenfiles
+
+ # Return without an open if we've already opened too many files--
+ # we don't want to make recovery run out of filehandles.
+ if { $nopenfiles > 30 } {
+ #puts "skipping--too many open files"
+ return -code break
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 [expr $ndbs - 1]]
+
+ set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
+
+ lappend opendbs [list $db $dbname($which)]
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
+
+ incr nopenfiles
+}
+
+# Update the database containing the number of files that db_recover has
+# to contend with--we want to avoid letting it run out of file descriptors.
+# We do this by keeping track of the number of unclosed opens since the
+# checkpoint before last.
+# $recd012_ofkey stores this current value; the two dups available
+# at $recd012_ofckptkey store the number of opens since the last checkpoint
+# previous.
+# Thus, if the current value is 17 when we do a checkpoint, and the
+# stored values are 3 and 8, the new current value (which we return)
+# is 14, and the new stored values are 8 and 6.
+proc recd012_nopenfiles_ckpt { env db nopenfiles } {
+ global recd012_ofckptkey
+ set txn [$env txn]
+ error_check_good nopenfiles_ckpt_txn [is_valid_txn $txn $env] TRUE
+
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Get the first ckpt value and delete it.
+ set dbt [$dbc get -set $recd012_ofckptkey]
+ error_check_good set [llength $dbt] 1
+
+ set discard [lindex [lindex $dbt 0] 1]
+ error_check_good del [$dbc del] 0
+
+ set nopenfiles [expr $nopenfiles - $discard]
+
+ # Get the next ckpt value
+ set dbt [$dbc get -nextdup]
+ error_check_good set2 [llength $dbt] 1
+
+ # Calculate how many opens we've had since this checkpoint before last.
+ set onlast [lindex [lindex $dbt 0] 1]
+ set sincelast [expr $nopenfiles - $onlast]
+
+ # Put this new number at the end of the dup set.
+ error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
+
+ # We should never deadlock since we're the only one in this db.
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ return $nopenfiles
+}
+
+# globals -- it's not worth passing these around, as they're constants
+set recd012_ofkey OPENFILES
+set recd012_ofckptkey CKPTS
diff --git a/db-4.8.30/test/recd013.tcl b/db-4.8.30/test/recd013.tcl
new file mode 100644
index 0000000..7d40cd8
--- /dev/null
+++ b/db-4.8.30/test/recd013.tcl
@@ -0,0 +1,291 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd013
+# TEST Test of cursor adjustment on child transaction aborts. [#2373]
+#
+# XXX
+# Other tests that cover more specific variants of the same issue
+# are in the access method tests for now. This is probably wrong; we
+# put this one here because they're closely based on and intertwined
+# with other, non-transactional cursor stability tests that are among
+# the access method tests, and because we need at least one test to
+# fit under recd and keep logtrack from complaining. We'll sort out the mess
+# later; the important thing, for now, is that everything that needs to gets
+# tested. (This really shouldn't be under recd at all, since it doesn't
+# run recovery!)
+proc recd013 { method { nitems 100 } args } {
+ source ./include.tcl
+ global alphabet log_log_record_types
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum "013"
+ set pgsz 512
+
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ puts "Recd$tnum $method ($args): Test of aborted cursor adjustments."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd013: skipping for specific pagesizes"
+ return
+ }
+
+ set testfile recd$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set keybase ""
+ } else {
+ set keybase "key"
+ }
+
+ puts "\tRecd$tnum.a:\
+ Create environment, database, and parent transaction."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set oflags \
+ "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Create a database containing $nitems items, numbered with odds.
+ # We'll then put the even numbers during the body of the test.
+ set txn [$env txn]
+ error_check_good init_txn [is_valid_txn $txn $env] TRUE
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+
+ # First, try to put the item in a child transaction,
+ # then abort and verify all the cursors we've done up until
+ # now.
+ set ctxn [$env txn -parent $txn]
+ error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+ for { set j 1 } { $j < $i } { incr j 2 } {
+ error_check_good dbc_get($j):1 [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+
+ # Then put for real.
+ error_check_good init_put($i) [$db put -txn $txn $key $data] 0
+
+ # Set a cursor of the parent txn to each item.
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc_getset($i) \
+ [$dbc($i) get -set $key] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+
+ # And verify all the cursors, including the one we just
+ # created.
+ for { set j 1 } { $j <= $i } { incr j 2 } {
+ error_check_good dbc_get($j):2 [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+ }
+
+ puts "\t\tRecd$tnum.a.1: Verify cursor stability after init."
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i):3 [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\tRecd$tnum.b: Put test."
+ puts "\t\tRecd$tnum.b.1: Put items."
+ set ctxn [$env txn -parent $txn]
+ error_check_good txn [is_valid_txn $ctxn $env] TRUE
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $ctxn $key $data] 0
+
+ # If we're a renumbering recno, this is uninteresting.
+ # Stir things up by putting a few additional records at
+ # the beginning.
+ if { [is_rrecno $method] == 1 } {
+ set curs [$db cursor -txn $ctxn]
+ error_check_bad llength_get_first \
+ [llength [$curs get -first]] 0
+ error_check_good cursor [is_valid_cursor $curs $db] TRUE
+ # expect a recno!
+ error_check_good rrecno_put($i) \
+ [$curs put -before ADDITIONAL.$i] 1
+ error_check_good curs_close [$curs close] 0
+ }
+ }
+
+ puts "\t\tRecd$tnum.b.2: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i):4 [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd$tnum.b.3: " 0 0 $nodump] 0
+
+ # Now put back all the even records, this time in the parent.
+ # Commit and re-begin the transaction so we can abort and
+ # get back to a nice full database.
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $txn $key $data] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ # Delete test. Set a cursor to each record. Delete the even ones
+ # in the parent and check cursor stability. Then open a child
+ # transaction, and delete the odd ones. Verify that the database
+ # is empty.
+ puts "\tRecd$tnum.c: Delete test."
+ unset dbc
+
+ # Create cursors pointing at each item.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc($i)_create [is_valid_cursor $dbc($i) $db] \
+ TRUE
+ error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd$tnum.c.1: Delete even items in child txn and abort."
+
+ if { [is_rrecno $method] != 1 } {
+ set init 2
+ set bound [expr 2 * $nitems]
+ set step 2
+ } else {
+ # In rrecno, deletes will renumber the items, so we have
+ # to take that into account when we delete by recno.
+ set init 2
+ set bound [expr $nitems + 1]
+ set step 1
+ }
+
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that no items are deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc_get($i):5 [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd$tnum.c.2: Delete even items in child txn and commit."
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_commit [$ctxn commit] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i):6 [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i):7 [$dbc($i) get -current] ""
+ }
+
+ puts "\t\tRecd$tnum.c.3: Delete odd items in child txn."
+
+ set ctxn [$env txn -parent $txn]
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ # If this is an rrecno, just delete the first
+ # item repeatedly--the renumbering will make
+ # that delete everything.
+ set j 1
+ }
+ error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
+ }
+
+ # Verify that everyone's deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good get_deleted($i) \
+ [llength [$db get -txn $ctxn $keybase$i]] 0
+ }
+
+ puts "\t\tRecd$tnum.c.4: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i):8 [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i):9 [$dbc($i) get -current] ""
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd$tnum.c.5: " 0 0 $nodump] 0
+
+ puts "\tRecd$tnum.d: Clean up."
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "\t\tRecd$tnum.d.1: " 0 0 $nodump] 0
+
+ if { $log_log_record_types == 1 } {
+ logtrack_read $testdir
+ }
+}
diff --git a/db-4.8.30/test/recd014.tcl b/db-4.8.30/test/recd014.tcl
new file mode 100644
index 0000000..651ac9b
--- /dev/null
+++ b/db-4.8.30/test/recd014.tcl
@@ -0,0 +1,446 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd014
+# TEST This is a recovery test for create/delete of queue extents. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd014 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { ![is_queueext $method] == 1 } {
+ puts "Recd014: Skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd014: skipping for specific pagesizes"
+ return
+ }
+
+ set orig_fixed_len $fixed_len
+ #
+ # We will use 512-byte pages, to be able to control
+ # when extents get created/removed.
+ #
+ set fixed_len 300
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ #
+ # We want to set -extent 1 instead of what
+ # convert_args gave us.
+ #
+ set exti [lsearch -exact $opts "-extent"]
+ incr exti
+ set opts [lreplace $opts $exti $exti 1]
+
+ puts "Recd014: $method extent creation/deletion tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd014.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd014.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+
+ puts "\tRecd014.b: Create test commit"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.b: Create test abort"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ puts "\tRecd014.c: Consume test commit"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.c: Consume test abort"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ set fixed_len $orig_fixed_len
+ puts "\tRecd014.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
+ global log_log_record_types
+ global fixed_len
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set init_file $dir/$dbfile.init
+ set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile"
+ set oflags "-env $env $noenvflags"
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ #
+ # The command to execute to create an extent is a put.
+ # We are just creating the first one, so our extnum is 0.
+ # extnum must be in the format that make_ext_file expects,
+ # but we just leave out the file name.
+ #
+ set extnum "/__dbq..0"
+ set data [chop_data $method [replicate $alphabet 512]]
+ puts "\t\tExecuting command"
+ set putrecno [$db put -txn $t -append $data]
+ error_check_good db_put $putrecno 1
+
+ # Sync the db so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ puts "\t\tSyncing"
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ error_check_good extput:exists1 [file exists $dbq] 1
+ set ret [$db get $putrecno]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted. Verify our entry is not there.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_good db_get [llength $ret] 0
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_get [llength $ret] 1
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ }
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ error_check_good db_close [$db close] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still does/n't exist when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ #
+ # Verify it did not change.
+ #
+ error_check_good extput:exists2 [file exists $dbq] 1
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # Undo.
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ file copy -force $dir/$dbfile.afterop $dir/$dbfile
+ move_file_extent $dir $dbfile afterop copy
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # To redo, remove the dbfiles. Run recovery again.
+ #
+ catch { file rename -force $dir/$dbfile $dir/$dbfile.renamed } res
+ copy_extent_file $dir $dbfile renamed rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ #
+ # !!!
+ # Even though db_recover exits with status 0, it should print out
+ # a warning because the file didn't exist. Db_recover writes this
+ # to stderr. Tcl assumes that ANYTHING written to stderr is an
+ # error, so even though we exit with 0 status, we still get an
+ # error back from 'catch'. Look for the warning.
+ #
+ if { $stat == 1 && [is_substr $result "warning"] == 0 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ #
+ # Verify it was redone. However, since we removed the files
+ # to begin with, recovery with abort will not recreate the
+ # extent. Recovery with commit will.
+ #
+ if {$txncmd == "abort"} {
+ error_check_good extput:exists3 [file exists $dbq] 0
+ } else {
+ error_check_good extput:exists3 [file exists $dbq] 1
+ }
+}
+
+proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
+ if { $txncmd == "commit" } {
+ #
+ # Operation was committed. Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation aborted. The file is there, but make
+ # sure the item is not.
+ #
+ set xdb [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $xdb] TRUE
+ set ret [$xdb get $putrecno]
+ error_check_good db_get [llength $ret] 0
+ error_check_good db_close [$xdb close] 0
+ }
+}
+
+proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
+ global log_log_record_types
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
+ -env $env $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set extnum "/__dbq..0"
+ set data [chop_data $method [replicate $alphabet 512]]
+
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set putrecno [$db put -txn $txn -append $data]
+ error_check_good db_put $putrecno 1
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\t\tExecuting command"
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed until recovery is run.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set dbcmd "$db get -txn $t -consume"
+ set ret [eval $dbcmd]
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ error_check_good db_sync [$db sync] 0
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted, verify ext did not change.
+ #
+ puts "\t\tCommand executed and aborted."
+
+ #
+ # Check that the file exists. Final state.
+ # Since we aborted the txn, we should be able
+ # to get to our original entry.
+ #
+ error_check_good postconsume.1 [file exists $dbq] 1
+ error_check_good \
+ diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ #
+ # Check file existence. Consume operations remove
+ # the extent when we move off, which we should have
+ # done.
+ error_check_good consume_exists [file exists $dbq] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here on what we ended up with. Should be a no-op.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover1 [file exists $dbq] 0
+ }
+
+ #
+ # Run recovery here. Re-do the operation.
+ # Verify that the file doesn't exist
+ # (if we committed) or change (if we aborted)
+ # when we are done.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover2 [file exists $dbq] 0
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ set filecopy [glob $dir/*.afterop]
+ set afterop [lindex $filecopy 0]
+ file rename -force $afterop $dir/$dbfile
+ set afterop [string range $afterop \
+ [expr [string last "/" $afterop] + 1] \
+ [string last "." $afterop]]
+ move_file_extent $dir $dbfile afterop rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it still does
+ # not exist.
+ #
+ error_check_good after_recover3 [file exists $dbq] 0
+ }
+}
diff --git a/db-4.8.30/test/recd015.tcl b/db-4.8.30/test/recd015.tcl
new file mode 100644
index 0000000..699afcb
--- /dev/null
+++ b/db-4.8.30/test/recd015.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd015
+# TEST This is a recovery test for testing lots of prepared txns.
+# TEST This test is to force the use of txn_recover to call with the
+# TEST DB_FIRST flag and then DB_NEXT.
+proc recd015 { method args } {
+ source ./include.tcl
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd015: $method ($args) prepared txns test"
+
+ # Create the database and environment.
+
+ set numtxns 1
+ set testfile NULL
+
+ set env_cmd "berkdb_env -create -txn -home $testdir"
+ set msg "\tRecd015.a"
+ foreach op { abort commit discard } {
+ puts "$msg: Simple test to prepare $numtxns txn with $op "
+ env_cleanup $testdir
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ #
+ # Now test large numbers of prepared txns to test DB_NEXT
+ # on txn_recover.
+ #
+ set numtxns 10000
+ set txnmax [expr $numtxns + 5]
+ set env_cmd "berkdb_env -create -txn_max $txnmax \
+ -lock_max_lockers $txnmax -txn -home $testdir"
+
+ set msg "\tRecd015.b"
+ foreach op { abort commit discard } {
+ puts "$msg: Large test to prepare $numtxns txn with $op"
+ env_cleanup $testdir
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+proc recd015_body { env_cmd testfile numtxns msg op } {
+ source ./include.tcl
+
+ sentinel_init
+ set gidf $testdir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
+ $testdir/recdout $env_cmd $testfile $gidf $numtxns &]
+
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/recdout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd -recover]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $numtxns
+
+ set gfd [open $gidf r]
+ set i 0
+ while { [gets $gfd gid] != -1 } {
+ set gids($i) $gid
+ incr i
+ }
+ close $gfd
+ #
+ # Make sure we have as many as we expect
+ error_check_good num_gids $i $numtxns
+
+ set i 0
+ puts "$msg.3: comparing GIDs and $op txns"
+ foreach tpair $txnlist {
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ error_check_good gidcompare $gid $gids($i)
+ error_check_good txn:$op [$txn $op] 0
+ incr i
+ }
+ if { $op != "discard" } {
+ error_check_good envclose [$env close] 0
+ return
+ }
+ #
+ # If we discarded, now do it again and randomly resolve some
+ # until all txns are resolved.
+ #
+ puts "$msg.4: resolving/discarding txns"
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ set opval(1) "abort"
+ set opcnt(1) 0
+ set opval(2) "commit"
+ set opcnt(2) 0
+ set opval(3) "discard"
+ set opcnt(3) 0
+ while { $len != 0 } {
+ set opicnt(1) 0
+ set opicnt(2) 0
+ set opicnt(3) 0
+ #
+ # Abort/commit or discard them randomly until
+ # all are resolved.
+ #
+ for { set i 0 } { $i < $len } { incr i } {
+ set t [lindex $txnlist $i]
+ set txn [lindex $t 0]
+ set newop [berkdb random_int 1 3]
+ set ret [$txn $opval($newop)]
+ error_check_good txn_$opval($newop):$i $ret 0
+ incr opcnt($newop)
+ incr opicnt($newop)
+ }
+# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
+# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
+# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
+
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ }
+
+ error_check_good envclose [$env close] 0
+}
diff --git a/db-4.8.30/test/recd016.tcl b/db-4.8.30/test/recd016.tcl
new file mode 100644
index 0000000..12c69e5
--- /dev/null
+++ b/db-4.8.30/test/recd016.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd016
+# TEST Test recovery after checksum error.
+proc recd016 { method args} {
+ global fixed_len
+ global log_log_record_types
+ global datastr
+ source ./include.tcl
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd016: skipping for specific pagesizes"
+ return
+ }
+ if { [is_queueext $method] == 1 || [is_partitioned $args]} {
+ puts "Recd016: skipping for method $method"
+ return
+ }
+
+ puts "Recd016: $method recovery after checksum error"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd016.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd016.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set pgsize 512
+ set orig_fixed_len $fixed_len
+ set fixed_len [expr $pgsize / 4]
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -auto_commit -chksum -pagesize $pgsize $opts $testfile"
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+
+ #
+ # Put some data.
+ #
+ set nument 50
+ puts "\tRecd016.b: Put some data"
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i$datastr
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good log_flush [$dbenv log_flush] 0
+ error_check_good env_close [$dbenv close] 0
+ #
+ # We need to remove the env so that we don't get cached
+ # pages.
+ #
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+
+ puts "\tRecd016.c: Overwrite part of database"
+ #
+ # First just touch some bits in the file. We want to go
+ # through the paging system, so touch some data pages,
+ # like the middle of page 2.
+ # We should get a checksum error for the checksummed file.
+ #
+ set pg 2
+ set fid [open $testdir/$testfile r+]
+ fconfigure $fid -translation binary
+ set seeklen [expr $pgsize * $pg + 200]
+ seek $fid $seeklen start
+ set byte [read $fid 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $fid $seeklen start
+ puts -nonewline $fid $newbyte
+ close $fid
+
+ #
+ # Verify we get the checksum error. When we get it, it should
+ # log the error as well, so when we run recovery we'll need to
+ # do catastrophic recovery. We do this in a sub-process so that
+ # the files are closed after the panic.
+ #
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [send_cmd $f1 $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
+ error_check_good db [is_valid_db $db] TRUE
+
+ # We need to set non-blocking mode so that after each command
+ # we can read all the remaining output from that command and
+ # we can know what the output from one command is.
+ fconfigure $f1 -blocking 0
+ set ret [read $f1]
+ set got_err 0
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [send_cmd $f1 "catch {$db get $i} r"]
+ set getret [send_cmd $f1 "puts \$r"]
+ set ret [read $f1]
+ if { $stat == 1 } {
+ error_check_good dbget:fail [is_substr $getret \
+ "checksum error: page $pg"] 1
+ set got_err 1
+ break
+ } else {
+ set key [lindex [lindex $getret 0] 0]
+ set data [lindex [lindex $getret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data \
+ [pad_data $method $i$datastr]
+ }
+ }
+ error_check_good got_chksum $got_err 1
+ set ret [send_cmd $f1 "$db close"]
+ set extra [read $f1]
+ error_check_good db:fail [is_substr $ret "run recovery"] 1
+
+ set ret [send_cmd $f1 "$dbenv close"]
+ error_check_good env_close:fail [is_substr $ret "handles still open"] 1
+ close $f1
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ puts "\tRecd016.d: Run normal recovery"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 1
+ error_check_good dbrec:fail \
+ [is_substr $r "checksum error"] 1
+
+ catch {fileremove $testdir/$testfile} ret
+ puts "\tRecd016.e: Run catastrophic recovery"
+ set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ #
+ # Now verify the data was reconstructed correctly.
+ #
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+ error_check_good db [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [catch {$db get $i} ret]
+ error_check_good stat $stat 0
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data [pad_data $method $i$datastr]
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good log_flush [$dbenv log_flush] 0
+ error_check_good env_close [$dbenv close] 0
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/db-4.8.30/test/recd017.tcl b/db-4.8.30/test/recd017.tcl
new file mode 100644
index 0000000..6ba6fd6
--- /dev/null
+++ b/db-4.8.30/test/recd017.tcl
@@ -0,0 +1,157 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd017
+# TEST Test recovery and security. This is basically a watered
+# TEST down version of recd001 just to verify that encrypted environments
+# TEST can be recovered.
+proc recd017 { method {select 0} args} {
+ global fixed_len
+ global encrypt
+ global passwd
+ global has_crypto
+ source ./include.tcl
+
+ # Skip test if release does not support encryption.
+ if { $has_crypto == 0 } {
+ puts "Skipping recd017 for non-crypto release."
+ return
+ }
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd017: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd017.db
+ set testfile2 recd017-2.db
+
+ set flags "-create -encryptaes $passwd -txn -home $testdir"
+
+ puts "\tRecd017.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ convert_encrypt $env_cmd
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method $args]
+ convert_encrypt $env_cmd
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd017.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd017.b: put"}
+ { {DB del -txn TXNID $key} "Recd017.c: delete"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd017_key
+ }
+ set data recd017_data
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg $args
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg $args
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg $args
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/db-4.8.30/test/recd018.tcl b/db-4.8.30/test/recd018.tcl
new file mode 100644
index 0000000..fd960c7
--- /dev/null
+++ b/db-4.8.30/test/recd018.tcl
@@ -0,0 +1,109 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd018
+# TEST Test recover of closely interspersed checkpoints and commits.
+#
+# This test is from the error case from #4230.
+#
+proc recd018 { method {ndbs 10} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum "018"
+
+ puts "Recd$tnum ($args): $method recovery of checkpoints and commits."
+
+ set tname recd$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set key2 2
+ } else {
+ set key KEY
+ set key2 KEY2
+ }
+
+ puts "\tRecd$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set db($i) [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ }
+
+ # Main loop: Write a record or two to each database.
+ # Do a commit immediately followed by a checkpoint after each one.
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd$tnum.b Put/Commit/Checkpoint to $ndbs databases"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db($i) put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ if { [expr $i % 2] == 0 } {
+ set txn [$dbenv txn]
+ error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put [$db($i) put \
+ -txn $txn $key2 [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ }
+ error_check_good db_close [$db($i) close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ }
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd$tnum.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd$tnum.d: Run recovery (initial file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd$tnum.e: Run recovery (after file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+}
diff --git a/db-4.8.30/test/recd019.tcl b/db-4.8.30/test/recd019.tcl
new file mode 100644
index 0000000..d9b8952
--- /dev/null
+++ b/db-4.8.30/test/recd019.tcl
@@ -0,0 +1,122 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd019
+# TEST Test txn id wrap-around and recovery.
+proc recd019 { method {numid 50} args} {
+ global fixed_len
+ global txn_curid
+ global log_log_record_types
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd019: $method txn id wrap-around test"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd019.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd019.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Test txn wrapping. Force a txn_recycle msg.
+ #
+ set new_curid $txn_curid
+ set new_maxid [expr $new_curid + $numid]
+ error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Force txn ids to wrap twice and then some.
+ #
+ set nument [expr $numid * 3 - 2]
+ puts "\tRecd019.b: Wrapping txn ids after $numid"
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ error_check_good env_close [$dbenv close] 0
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ # Now, loop through and recover.
+ puts "\tRecd019.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.d: Run recovery (initial file)"
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.e: Run recovery (after file)"
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/db-4.8.30/test/recd020.tcl b/db-4.8.30/test/recd020.tcl
new file mode 100644
index 0000000..7e5941c
--- /dev/null
+++ b/db-4.8.30/test/recd020.tcl
@@ -0,0 +1,81 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd020
+# TEST Test creation of intermediate directories -- an
+# TEST undocumented, UNIX-only feature.
+#
+proc recd020 { method args } {
+ source ./include.tcl
+ global tcl_platform
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum "020"
+ set nentries 10
+
+ if { $tcl_platform(platform) != "unix" } {
+ puts "Skipping recd$tnum for non-UNIX platform."
+ return
+ }
+
+ puts "Recd$tnum ($method):\
+ Test creation of intermediate directories in recovery."
+
+ # Create the original intermediate directory.
+ env_cleanup $testdir
+ set intdir INTDIR
+ file mkdir $testdir/$intdir
+
+ set testfile recd$tnum.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd$tnum.a: Create environment and populate database."
+ set env_cmd "berkdb_env $flags"
+ set env [eval $env_cmd]
+ error_check_good env [is_valid_env $env] TRUE
+
+ set db [eval berkdb_open \
+ -create $omethod $args -env $env -auto_commit $intdir/$testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set txn [$env txn]
+ set data "data"
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ error_check_good db_put [eval \
+ {$db put} -txn $txn $i [chop_data $method $data.$i]] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+
+ puts "\tRecd$tnum.b: Remove intermediate directory."
+ error_check_good directory_there [file exists $testdir/$intdir] 1
+ file delete -force $testdir/$intdir
+ error_check_good directory_gone [file exists $testdir/$intdir] 0
+
+ puts "\tRecd020.c: Run recovery, recreating intermediate directory."
+ set env [eval $env_cmd -set_intermediate_dir_mode "rwxr-x--x" -recover]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tRecd020.d: Reopen test file to verify success."
+ set db [eval {berkdb_open} -env $env $args $intdir/$testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set ret [$db get $i]
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good key $k $i
+ error_check_good data $d [pad_data $method $data.$i]
+ }
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+
+}
diff --git a/db-4.8.30/test/recd021.tcl b/db-4.8.30/test/recd021.tcl
new file mode 100644
index 0000000..b6eae16
--- /dev/null
+++ b/db-4.8.30/test/recd021.tcl
@@ -0,0 +1,278 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd021
+# TEST Test of failed opens in recovery.
+# TEST
+# TEST If a file was deleted through the file system (and not
+# TEST within Berkeley DB), an error message should appear.
+# TEST Test for regular files and subdbs.
+
+proc recd021 { method args } {
+ source ./include.tcl
+ global util_path
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set nentries 100
+
+ puts "\nRecd021: ($method)\
+ Test failed opens in recovery."
+
+ # The file ops "remove" and "rename" are done within
+ # Berkeley DB. A "delete" is done directly on the file
+ # system (as if the user deleted the file).
+ #
+ # First test regular files.
+ #
+ foreach op { remove rename delete noop } {
+ env_cleanup $testdir
+ puts "\tRecd021: Test $op of file in recovery."
+
+ # Create transactional environment.
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # Create database
+ puts "\t\tRecd021.a.1: Create and populate file."
+
+ if { $op == "rename" } {
+ set names {A B}
+ } else {
+ set names {A}
+ }
+ set name [lindex $names 0]
+
+ set db [eval {berkdb_open \
+ -create} $omethod $args -env $env -auto_commit $name.db]
+ error_check_good dba_open [is_valid_db $db] TRUE
+
+ # Checkpoint.
+ error_check_good txn_checkpoint [$env txn_checkpoint] 0
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ error_check_good dba_put [$db put $i data$i] 0
+ }
+ error_check_good dba_close [$db close] 0
+
+ # Do operation on file.
+ puts "\t\tRecd021.b: Do $op on file."
+ set txn [$env txn]
+ set ret [do_op $omethod $op $names $txn $env]
+ error_check_good do_op $ret 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good env_close [$env close] 0
+
+ # Recover.
+ puts "\t\tRecd021.c: Recover."
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ if { $op == "delete" } {
+ error_check_good external_delete \
+ [is_substr $r "Warning: open failed"] 1
+ } else {
+ error_check_good $op $ret 0
+ }
+
+ # Clean up.
+ error_check_good \
+ env_remove [berkdb envremove -force -home $testdir] 0
+ fileremove -f $testdir/$name.db
+ }
+
+ # Test subdbs.
+ if { [is_queue $method] == 1 } {
+ puts "Recd021: Skipping test of subdbs for method $method."
+ return
+ }
+
+ # The first subdb test just does the op, and is comparable
+ # to the tests for regular files above.
+ set trunc 0
+ set special {}
+ foreach op { remove rename delete noop } {
+ recd021_testsubdb $method $op $nentries $special $trunc $args
+ }
+
+ # The remainder of the tests are executed first with the log intact,
+ # then with the log truncated at the __db_subdb_name record.
+ foreach trunc { 0 1 } {
+ # Test what happens if subdb2 reuses pages formerly in
+ # subdb1, after removing subdb1.
+ set special "reuse"
+ recd021_testsubdb $method remove $nentries $special $trunc $args
+
+ # Test what happens if a new subdb reuses pages formerly
+ # in subdb1, after removing subdb1.
+ set special "newdb"
+ recd021_testsubdb $method remove $nentries $special $trunc $args
+
+ # Now we test what happens if a new subdb if a different access
+ # method reuses pages formerly in subdb1, after removing subdb1.
+ set special "newtypedb"
+ recd021_testsubdb $method remove $nentries $special $trunc $args
+ }
+}
+
+proc recd021_testsubdb { method op nentries special trunc largs } {
+ source ./include.tcl
+ global util_path
+
+ set omethod [convert_method $method]
+ env_cleanup $testdir
+
+ puts "\tRecd021: \
+ Test $op of subdb in recovery ($special trunc = $trunc)."
+
+ # Create transactional environment.
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ # Create database with 2 subdbs
+ puts "\t\tRecd021.d: Create and populate subdbs."
+ set sname1 S1
+ set sname2 S2
+ if { $op == "rename" } {
+ set names {A S1 NEW_S1}
+ } elseif { $op == "delete" } {
+ set names {A}
+ } else {
+ set names {A S1}
+ }
+ set name [lindex $names 0]
+
+ set sdb1 [eval {berkdb_open -create} $omethod \
+ $largs -env $env -auto_commit $name.db $sname1]
+ error_check_good sdb1_open [is_valid_db $sdb1] TRUE
+ set sdb2 [eval {berkdb_open -create} $omethod \
+ $largs -env $env -auto_commit $name.db $sname2]
+ error_check_good sdb2_open [is_valid_db $sdb2] TRUE
+
+ # Checkpoint.
+ error_check_good txn_checkpoint [$env txn_checkpoint] 0
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ error_check_good sdb1_put [$sdb1 put $i data$i] 0
+ }
+ set dumpfile dump.s1.$trunc
+ set ret [exec $util_path/db_dump -dar -f $dumpfile -h $testdir A.db]
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ error_check_good sdb2_put [$sdb2 put $i data$i] 0
+ }
+ error_check_good sdb1_close [$sdb1 close] 0
+
+ # Do operation on subdb.
+ puts "\t\tRecd021.e: Do $op on file."
+ set txn [$env txn]
+
+ if { $trunc == 1 } {
+ # Create a log cursor to mark where we are before
+ # doing the op.
+ set logc [$env log_cursor]
+ set ret [lindex [$logc get -last] 0]
+ file copy -force $testdir/log.0000000001 $testdir/log.sav
+ }
+
+ set ret [do_subdb_op $omethod $op $names $txn $env]
+ error_check_good do_subdb_op $ret 0
+ error_check_good txn_commit [$txn commit] 0
+
+ if { $trunc == 1 } {
+ # Walk the log and find the __db_subdb_name entry.
+ set found 0
+ while { $found == 0 } {
+ set lsn [lindex [$logc get -next] 0]
+ set lfile [lindex $lsn 0]
+ set loff [lindex $lsn 1]
+ set logrec [exec $util_path/db_printlog -h $testdir \
+ -b $lfile/$loff -e $lfile/$loff]
+ if { [is_substr $logrec __db_subdb_name] == 1 } {
+ set found 1
+ }
+ }
+ # Create the truncated log, and save it for later.
+ catch [exec dd if=$testdir/log.0000000001 \
+ of=$testdir/log.sav count=$loff bs=1 >& /dev/null ] res
+ }
+
+ # Here we do the "special" thing, if any. We always
+ # have to close sdb2, but when we do so varies.
+ switch -exact -- $special {
+ "" {
+ error_check_good sdb2_close [$sdb2 close] 0
+ }
+ reuse {
+ for { set i [expr $nentries + 1] } \
+ { $i <= [expr $nentries * 2]} { incr i } {
+ error_check_good sdb2_put \
+ [$sdb2 put $i data$i] 0
+ }
+ error_check_good sdb2_close [$sdb2 close] 0
+ set dumpfile dump.s2.$trunc
+ set ret [exec $util_path/db_dump -dar \
+ -f $dumpfile -h $testdir A.db]
+ }
+ newdb {
+ error_check_good sdb2_close [$sdb2 close] 0
+ set sname3 S3
+ set sdb3 [eval {berkdb_open -create} $omethod \
+ $largs -env $env -auto_commit $name.db $sname3]
+ error_check_good sdb3_open [is_valid_db $sdb3] TRUE
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ error_check_good sdb3_put \
+ [$sdb3 put $i data$i] 0
+ }
+ error_check_good sdb3_close [$sdb3 close] 0
+ }
+ newtypedb {
+ error_check_good sdb2_close [$sdb2 close] 0
+ set sname4 S4
+ set newmethod [different_method $method]
+ set args [convert_args $newmethod]
+ set omethod [convert_method $newmethod]
+ set sdb4 [eval {berkdb_open -create} $omethod \
+ $args -env $env -auto_commit $name.db $sname4]
+ error_check_good sdb4_open [is_valid_db $sdb4] TRUE
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ error_check_good sdb4_put \
+ [$sdb4 put $i data$i] 0
+ }
+ error_check_good sdb4_close [$sdb4 close] 0
+ }
+ }
+
+ # Close the env.
+ error_check_good env_close [$env close] 0
+
+ if { $trunc == 1 } {
+ # Swap in the truncated log.
+ file rename -force $testdir/log.sav $testdir/log.0000000001
+ }
+
+ # Recover.
+ puts "\t\tRecd021.f: Recover."
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ if { $op == "delete" || $trunc == 1 && $special != "newdb" } {
+ error_check_good expect_warning \
+ [is_substr $r "Warning: open failed"] 1
+ } else {
+ error_check_good subdb_$op $ret 0
+ }
+
+ # Clean up.
+ error_check_good env_remove [berkdb envremove -force -home $testdir] 0
+ fileremove -f $testdir/$name.db
+}
+
+proc different_method { method } {
+ # Queue methods are omitted, since this is for subdb testing.
+ set methodlist { -btree -rbtree -recno -frecno -rrecno -hash }
+
+ set method [convert_method $method]
+ set newmethod $method
+ while { $newmethod == $method } {
+ set index [berkdb random_int 0 [expr [llength $methodlist] - 1]]
+ set newmethod [lindex $methodlist $index]
+ }
+ return $newmethod
+}
diff --git a/db-4.8.30/test/recd022.tcl b/db-4.8.30/test/recd022.tcl
new file mode 100644
index 0000000..cdcf109
--- /dev/null
+++ b/db-4.8.30/test/recd022.tcl
@@ -0,0 +1,136 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd022
+# TEST Test that pages allocated by an aborted subtransaction
+# TEST within an aborted prepared parent transaction are returned
+# TEST to the free list after recovery. This exercises
+# TEST __db_pg_prepare in systems without FTRUNCATE. [#7403]
+
+proc recd022 { method args} {
+ global log_log_record_types
+ global fixed_len
+ global is_hp_test
+ source ./include.tcl
+
+ # Skip test for specified page sizes -- we want to
+ # specify our own page size.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd022: Skipping for specific pagesizes"
+ return
+ }
+
+ # Skip the test for HP-UX, where we can't open an env twice.
+ if { $is_hp_test == 1 } {
+ puts "Recd022: Skipping for HP-UX."
+ return
+ }
+
+
+ # Increase size of fixed-length records to match other methods.
+ set orig_fixed_len $fixed_len
+ set fixed_len 53
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd022: ($method) Page allocation and recovery"
+ puts "Recd022: with aborted prepared txns and child txns."
+
+ # Create the database and environment.
+ env_cleanup $testdir
+ set testfile recd022.db
+
+ puts "\tRecd022.a: creating environment"
+ # We open the env and database with _noerr so we don't
+ # get error messages when cleaning up at the end of the test.
+ set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Open database with small pages.
+ puts "\tRecd022.b: creating database with small pages"
+ set pagesize 512
+ set oflags "-create $omethod -mode 0644 -pagesize $pagesize \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open_noerr} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tRecd022.c: start transaction, put some data"
+ set iter 10
+ set datasize 53
+ set data [repeat "a" $datasize]
+ set iter2 [expr $iter * 2]
+
+ # Start parent and child txns.
+ puts "\tRecd022.d: start child txn, put some data"
+ set parent [$dbenv txn]
+ set child1 [$dbenv txn -parent $parent]
+
+ # Child puts some new data.
+ for { set i 1 } {$i <= $iter } { incr i } {
+ eval {$db put} -txn $child1 $i $data
+ }
+
+ # Abort the child txn.
+ puts "\tRecd022.e: abort child txn"
+ error_check_good child1_abort [$child1 abort] 0
+
+ # Start a second child. Put some data, enough to allocate
+ # a new page, then delete it.
+ puts "\tRecd022.f: start second child txn, put some data"
+ set child2 [$dbenv txn -parent $parent]
+ for { set i 1 } { $i <= $iter2 } { incr i } {
+ eval {$db put} -txn $child2 $i $data
+ }
+ for { set i 1 } { $i <= $iter2 } { incr i } {
+ eval {$db del} -txn $child2 $i
+ }
+
+ # Put back half the data.
+ for { set i 1 } { $i <= $iter } { incr i } {
+ eval {$db put} -txn $child2 $i $data
+ }
+
+ # Commit second child
+ puts "\tRecd022.g: commit second child txn, prepare parent"
+ error_check_good child2_commit [$child2 commit] 0
+
+ # Prepare parent
+ error_check_good prepare [$parent prepare "ABC"] 0
+
+ # Recover, then abort the recovered parent txn
+ puts "\tRecd022.h: recover, then abort parent"
+ set env1 [berkdb_env -create -recover -home $testdir -txn]
+ set txnlist [$env1 txn_recover]
+ set aborttxn [lindex [lindex $txnlist 0] 0]
+ error_check_good parent_abort [$aborttxn abort] 0
+
+ # Verify database and then clean up. We still need to get
+ # rid of the handles created before recovery.
+ puts "\tRecd022.i: verify and clean up"
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+ verify_dir $testdir "" 1 0 $nodump
+ set stat [catch {$db close} res]
+ error_check_good db_close [is_substr $res "run recovery"] 1
+ error_check_good env1_close [$env1 close] 0
+ set stat [catch {$dbenv close} res]
+ error_check_good dbenv_close [is_substr $res "run recovery"] 1
+
+ # Track the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ # Set fixed_len back to the global value so we don't
+ # mess up other tests.
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/db-4.8.30/test/recd023.tcl b/db-4.8.30/test/recd023.tcl
new file mode 100644
index 0000000..eb9dacc
--- /dev/null
+++ b/db-4.8.30/test/recd023.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd023
+# TEST Test recover of reverse split.
+#
+proc recd023 { method args } {
+ source ./include.tcl
+ env_cleanup $testdir
+ set tnum "023"
+
+ if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
+ puts "Skipping recd$tnum for method $method"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ puts "Recd$tnum ($omethod $args): Recovery of reverse split."
+ set testfile recd$tnum.db
+
+ puts "\tRecd$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set env [eval $env_cmd]
+ error_check_good env [is_valid_env $env] TRUE
+
+ set pagesize 512
+ set oflags "$omethod -auto_commit \
+ -pagesize $pagesize -create -mode 0644 $args"
+ set db [eval {berkdb_open} -env $env $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Write to database -- enough to fill at least 3 levels.
+ puts "\tRecd$tnum.b: Create a 3 level btree database."
+ set nentries 1000
+ set datastr [repeat x 45]
+ for { set i 1 } { $i < $nentries } { incr i } {
+ set key a$i
+ set ret [$db put $key [chop_data $method $datastr]]
+ error_check_good put $ret 0
+ }
+
+ # Verify we have enough levels.
+ set levels [stat_field $db stat "Levels"]
+ error_check_good 3_levels [expr $levels >= 3] 1
+
+ # Save the original database.
+ file copy -force $testdir/$testfile $testdir/$testfile.save
+
+ # Delete enough pieces to collapse the tree.
+ puts "\tRecd$tnum.c: Do deletes to collapse database."
+ for { set count 2 } { $count < 10 } { incr count } {
+ error_check_good db_del [$db del a$count] 0
+ }
+ for { set count 15 } { $count < 100 } { incr count } {
+ error_check_good db_del [$db del a$count] 0
+ }
+ for { set count 150 } { $count < 1000 } { incr count } {
+ error_check_good db_del [$db del a$count] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good verify_dir\
+ [verify_dir $testdir "\tRecd$tnum.d: " 0 0 $nodump] 0
+
+ # Overwrite the current database with the saved database.
+ file copy -force $testdir/$testfile.save $testdir/$testfile
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+
+ # Recover the saved database to roll forward and apply the deletes.
+ set env [berkdb_env -create -txn -home $testdir -recover]
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good env_close [$env close] 0
+
+ error_check_good verify_dir\
+ [verify_dir $testdir "\tRecd$tnum.e: " 0 0 $nodump] 0
+}
diff --git a/db-4.8.30/test/recd024.tcl b/db-4.8.30/test/recd024.tcl
new file mode 100644
index 0000000..cd18a67
--- /dev/null
+++ b/db-4.8.30/test/recd024.tcl
@@ -0,0 +1,81 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996,2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST recd024
+# TEST Test recovery of streaming partial insert operations. These are
+# TEST operations that do multiple partial puts that append to an existing
+# TEST data item (as long as the data item is on an overflow page).
+# TEST The interesting cases are:
+# TEST * Simple streaming operations
+# TEST * Operations that cause the overflow item to flow onto another page.
+# TEST
+proc recd024 { method args } {
+ source ./include.tcl
+
+ # puts "$args"
+ set envargs ""
+ set pagesize 512
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Recd024 skipping for fixed length access methods."
+ return
+ }
+ set flags "-create -txn -home $testdir $envargs"
+ set env_cmd "berkdb_env $flags"
+
+ set testfile recd024.db
+ set testfile2 recd024-2.db
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd024_key
+ }
+
+ set len 512
+ set part_data [replicate "abcdefgh" 64]
+ set p [list 0 $len]
+ # Insert one 512 byte data item prior to call. To get it off page.
+ # Append two more 512 byte data items, to enact the streaming code.
+ set cmd [subst \
+ {DBC put -txn TXNID -partial "512 512" -current $part_data \
+ NEW_CMD DBC put -txn TXNID -partial "1024 512" -current $part_data \
+ NEW_CMD DBC put -txn TXNID -partial "1536 512" -current $part_data}]
+ set oflags "-create $omethod -mode 0644 $args \
+ -pagesize $pagesize"
+ set msg "Recd024.a: partial put prepopulated/expanding"
+ foreach op {commit abort prepare-abort prepare-discard prepare-commit} {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
+ set db [eval {berkdb_open} \
+ $oflags -env $dbenv -txn $t $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set db2 [eval {berkdb_open} \
+ $oflags -env $dbenv -txn $t $testfile2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ set ret [$db put -txn $t -partial $p $key $part_data]
+ error_check_good dbput $ret 0
+
+ set ret [$db2 put -txn $t -partial $p $key $part_data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
+
+ op_recover $op $testdir $env_cmd $testfile $cmd $msg \
+ $args
+ }
+ return
+}
+
diff --git a/db-4.8.30/test/recd15scr.tcl b/db-4.8.30/test/recd15scr.tcl
new file mode 100644
index 0000000..c6fc7f2
--- /dev/null
+++ b/db-4.8.30/test/recd15scr.tcl
@@ -0,0 +1,73 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Recd15 - lots of txns - txn prepare script
+# Usage: recd15script envcmd dbcmd gidf numtxns
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# numtxns: number of txns to start
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "recd15script envcmd dbfile gidfile numtxns"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set numtxns [ lindex $argv 3 ]
+
+set txnmax [expr $numtxns + 5]
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 0
+if { $dbfile != "NULL" } {
+ set usedb 1
+ set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+}
+
+puts "\tRecd015script.a: Begin $numtxns txns"
+for {set i 0} {$i < $numtxns} {incr i} {
+ set t [$dbenv txn]
+ error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
+ set txns($i) $t
+ if { $usedb } {
+ set dbc [$db cursor -txn $t]
+ error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
+ set curs($i) $dbc
+ }
+}
+
+puts "\tRecd015script.b: Prepare $numtxns txns"
+set gfd [open $gidfile w+]
+for {set i 0} {$i < $numtxns} {incr i} {
+ if { $usedb } {
+ set dbc $curs($i)
+ error_check_good dbc_close [$dbc close] 0
+ }
+ set t $txns($i)
+ set gid [make_gid recd015script:$t]
+ puts $gfd $gid
+ error_check_good txn_prepare:$t [$t prepare $gid] 0
+}
+close $gfd
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tRecd015script completed successfully"
+flush stdout
diff --git a/db-4.8.30/test/recdscript.tcl b/db-4.8.30/test/recdscript.tcl
new file mode 100644
index 0000000..0c6d327
--- /dev/null
+++ b/db-4.8.30/test/recdscript.tcl
@@ -0,0 +1,37 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Recovery txn prepare script
+# Usage: recdscript op dir envcmd dbfile cmd
+# op: primary txn operation
+# dir: test directory
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# cmd: db command to execute
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "recdscript op dir envcmd dbfile gidfile cmd"
+
+# Verify usage
+if { $argc < 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set op [ lindex $argv 0 ]
+set dir [ lindex $argv 1 ]
+set envcmd [ lindex $argv 2 ]
+set dbfile [ lindex $argv 3 ]
+set gidfile [ lindex $argv 4 ]
+set cmd [ lindex $argv 5 ]
+set args [ lindex $argv 6 ]
+
+eval {op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd} $args
+flush stdout
diff --git a/db-4.8.30/test/rep001.tcl b/db-4.8.30/test/rep001.tcl
new file mode 100644
index 0000000..aebf486
--- /dev/null
+++ b/db-4.8.30/test/rep001.tcl
@@ -0,0 +1,229 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep001
+# TEST Replication rename and forced-upgrade test.
+# TEST
+# TEST Run rep_test in a replicated master environment.
+# TEST Verify that the database on the client is correct.
+# TEST Next, remove the database, close the master, upgrade the
+# TEST client, reopen the master, and make sure the new master can
+# TEST correctly run rep_test and propagate it in the other direction.
+
+proc rep001 { method { niter 1000 } { tnum "001" } args } {
+ global passwd
+ global has_crypto
+ global databases_in_memory
+ global repfiles_in_memory
+
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # It's possible to run this test with in-memory databases.
+ set msg "with named databases"
+ if { $databases_in_memory } {
+ set msg "with in-memory named databases"
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping rep$tnum for method $method"
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run tests with and without recovery. If we're doing testing
+ # of in-memory logging, skip the combination of recovery
+ # and in-memory logging -- it doesn't make sense.
+ set logsets [create_logsets 2]
+ set saved_args $args
+
+ foreach recopt $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $recopt == "-recover" && $logindex != -1 } {
+ puts "Skipping test with -recover for in-memory logs."
+ continue
+ }
+ set envargs ""
+ set args $saved_args
+ puts -nonewline "Rep$tnum: Replication sanity test "
+ puts "($method $recopt) $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep001_sub $method $niter $tnum $envargs $l $recopt $args
+
+ # Skip encrypted tests if not supported.
+ if { $has_crypto == 0 || $databases_in_memory } {
+ continue
+ }
+
+ # Run the same tests with security. In-memory
+ # databases don't work with encryption.
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+ puts "Rep$tnum: Replication and security sanity test\
+ ($method $recopt)."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep001_sub $method \
+ $niter $tnum $envargs $l $recopt $args
+ }
+ }
+}
+
+proc rep001_sub { method niter tnum envargs logset recargs largs } {
+ source ./include.tcl
+ global testdir
+ global encrypt
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ set verify_subset \
+ [expr { $m_logtype == "in-memory" || $c_logtype == "in-memory" }]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. Adjust the args for master
+ # and client.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create $repmemargs \
+ -log_max 1000000 $envargs $m_logargs $recargs $verbargs \
+ -home $masterdir -errpfx MASTER $m_txnargs -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create $repmemargs \
+ -log_max 1000000 $envargs $c_logargs $recargs $verbargs \
+ -home $clientdir -errpfx CLIENT $c_txnargs -rep_client \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C)]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # db_remove in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a:\
+ Running rep_test in replicated env ($envargs $recargs)."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Verifying client database contents."
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ rep_verify $masterdir $masterenv \
+ $clientdir $clientenv $verify_subset 1 1
+
+ # Remove the file (and update client).
+ puts "\tRep$tnum.c: Remove the file on the master and close master."
+ error_check_good remove \
+ [eval {$masterenv dbremove} -auto_commit $dbname] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ process_msgs $envlist
+
+ puts "\tRep$tnum.d: Upgrade client."
+ set newmasterenv $clientenv
+ error_check_good upgrade_client [$newmasterenv rep_start -master] 0
+
+ # Run rep_test in the new master
+ puts "\tRep$tnum.e: Running rep_test in new master."
+ eval rep_test $method $newmasterenv NULL $niter 0 0 0 $largs
+ set envlist "{$newmasterenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.f: Reopen old master as client and catch up."
+ # Throttle master so it can't send everything at once
+ $newmasterenv rep_limit 0 [expr 64 * 1024]
+ set newclientenv [eval {berkdb_env_noerr -create -recover} \
+ $envargs $m_logargs $m_txnargs -errpfx NEWCLIENT $verbargs $repmemargs \
+ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
+ set envlist "{$newclientenv 1} {$newmasterenv 2}"
+ process_msgs $envlist
+
+ # If we're running with a low number of iterations, we might
+ # not have had to throttle the data transmission; skip the check.
+ if { $niter > 200 } {
+ set nthrottles \
+ [stat_field $newmasterenv rep_stat "Transmission limited"]
+ error_check_bad nthrottles $nthrottles -1
+ error_check_bad nthrottles $nthrottles 0
+ }
+
+ # Run a modified rep_test in the new master (and update client).
+ puts "\tRep$tnum.g: Running rep_test in new master."
+ eval rep_test $method \
+ $newmasterenv NULL $niter $niter $niter 0 $largs
+ process_msgs $envlist
+
+ # Verify the database in the client dir.
+ puts "\tRep$tnum.h: Verifying new client database contents."
+
+ rep_verify $masterdir $newmasterenv \
+ $clientdir $newclientenv $verify_subset 1 1
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep$tnum.k: " 0 0 1] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep002.tcl b/db-4.8.30/test/rep002.tcl
new file mode 100644
index 0000000..8dfc2f2
--- /dev/null
+++ b/db-4.8.30/test/rep002.tcl
@@ -0,0 +1,330 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep002
+# TEST Basic replication election test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master
+# TEST environment; hold an election among a group of clients to
+# TEST make sure they select a proper master from amongst themselves,
+# TEST in various scenarios.
+
+proc rep002 { method { niter 10 } { nclients 3 } { tnum "002" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Skip for record-based methods.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_record_based $method] != 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_record_based $method] == 1 } {
+ puts "Rep002: Skipping for method $method."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set logsets [create_logsets [expr $nclients + 1]]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping test with -recover for in-memory logs."
+ }
+ puts "Rep$tnum ($method $r): Replication election\
+ test with $nclients clients $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep002_sub $method $niter $nclients $tnum $l $r $args
+ }
+ }
+}
+
+proc rep002_sub { method niter nclients tnum logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global elect_timeout elect_serial
+ set elect_timeout(default) 5000000
+
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_logargs -errpfx MASTER $verbargs \
+ $m_txnargs -rep_master -rep_transport \[list 1 replsend\]"
+ # In an election test, the -recovery arg must not go
+ # in the env_cmd string because that is going to be
+ # passed to a child process.
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \
+ -event rep_event $repmemargs \
+ $c_logargs($i) $c_txnargs($i) -rep_client -errpfx CLIENT$i \
+ $verbargs -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i) $recargs]
+ }
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ set envlist {}
+ lappend envlist "$masterenv 1"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ lappend envlist "$clientenv($i) [expr $i + 2]"
+ }
+ process_msgs $envlist
+
+ # Run a modified test001 in the master.
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 0 $tnum -env $masterenv $largs
+ process_msgs $envlist
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep$tnum.c: " 0 0 1
+ }
+
+ # Start an election in the first client.
+ puts "\tRep$tnum.d: Starting election with existing master."
+ # We want to verify that the master declares the election
+ # over by fiat, even if everyone uses a lower priority than 20.
+ # Loop and process all messages, keeping track of which
+ # sites got a HOLDELECTION and checking that the master i.d. is
+ # unchanged after the election.
+
+ set origrole [stat_field $masterenv rep_stat "Role"]
+ error_check_good originally_master $origrole "master"
+ set origgeneration [stat_field $masterenv rep_stat "Generation number"]
+
+ set got_hold_elect(M) 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set got_hold_elect($i) 0
+ set elect_pipe($i) INVALID
+ }
+ set elect_pipe(0) [start_election C0 $qdir $env_cmd(0) \
+ [expr $nclients + 1] $nclients 20 $elect_timeout(default)]
+
+ tclsleep 2
+
+ set got_master 0
+ while { 1 } {
+ set nproced 0
+ set he 0
+
+ incr nproced [replprocessqueue $masterenv 1 0 he]
+
+ if { $he == 1 } {
+ incr elect_serial
+ set elect_pipe(M) [start_election CM $qdir \
+ $env_cmd(M) [expr $nclients + 1] $nclients \
+ 0 $elect_timeout(default)]
+ set got_hold_elect(M) 1
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he]
+ if { $he == 1 } {
+ # error_check_bad client(0)_in_elect $i 0
+ if { $elect_pipe($i) != "INVALID" } {
+ close_election $elect_pipe($i)
+ }
+ incr elect_serial
+ set pfx CHILD$i.$elect_serial
+ set elect_pipe($i) [start_election $pfx $qdir \
+ $env_cmd($i) [expr $nclients + 1] \
+ $nclients 0 \
+ $elect_timeout(default)]
+ set got_hold_elect($i) 1
+ }
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ set role [stat_field $masterenv rep_stat "Role"]
+ set generation [stat_field $masterenv rep_stat "Generation number"]
+ error_check_good master_unchanged $origrole $role
+ error_check_good gen_unchanged $origgeneration $generation
+ cleanup_elections
+
+ # We need multiple clients to proceed from here.
+ if { $nclients < 2 } {
+ puts "\tRep$tnum: Skipping for less than two clients."
+ error_check_good masterenv_close [$masterenv close] 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) \
+ [$clientenv($i) close] 0
+ }
+ return
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ process_msgs $envlist
+
+ # Now hold another election in the first client, this time with
+ # a dead master.
+ puts "\tRep$tnum.e: Starting election with dead master."
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ set m "Rep$tnum.e"
+ # We're not going to be using err_cmd, so initialize to "none".
+ # Client #1 has priority 100; everyone else has priority 10.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set err_cmd($i) "none"
+ set crash($i) 0
+ if { $i == 1 } {
+ set pri($i) 100
+ } else {
+ set pri($i) 10
+ }
+ }
+ set nsites $nclients
+ set nvotes $nclients
+ # The elector calls the first election. The expected winner
+ # is $win.
+ set elector 1
+ set win 1
+ run_election env_cmd envlist err_cmd pri crash $qdir $m \
+ $elector $nsites $nvotes $nclients $win 1 "test$tnum.db"
+
+ # Hold an election with two clients at the same (winning) priority.
+ # Make sure that the tie gets broken, and that the third client
+ # does not win.
+ puts "\tRep$tnum.f: Election with two clients at same priority."
+ set m "Rep$tnum.f"
+ # Clients 0 and 1 have high, matching priority.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ if { $i >= 2 } {
+ set pri($i) 10
+ } else {
+ set pri($i) 100
+ }
+ }
+
+ # Run several elections.
+ set elections 5
+ for { set i 0 } { $i < $elections } { incr i } {
+ #
+ # The expected winner is 0 or 1. Since run_election can only
+ # handle one expected winner, catch the result and inspect it.
+ #
+ set elector 0
+ set win 1
+ set altwin 0
+ if {[catch {eval run_election \
+ env_cmd envlist err_cmd pri crash $qdir $m $elector $nsites \
+ $nvotes $nclients $win 1 "test$tnum.db"} res]} {
+ #
+ # If the primary winner didn't win, make sure
+ # the alternative winner won. Do all the cleanup
+ # for that winner normally done in run_election:
+ # open and close the new master, then reopen as a
+ # client for the next cycle.
+ #
+ error_check_good check_winner [is_substr \
+ $res "expected 3, got [expr $altwin + 2]"] 1
+ puts "\t$m: Election $i: Alternate winner $altwin won."
+ error_check_good make_master \
+ [$clientenv($altwin) rep_start -master] 0
+
+ cleanup_elections
+ process_msgs $envlist
+
+ error_check_good newmaster_close \
+ [$clientenv($altwin) close] 0
+ set clientenv($altwin) [eval $env_cmd($altwin)]
+ error_check_good cl($altwin) \
+ [is_valid_env $clientenv($altwin)] TRUE
+ set newelector "$clientenv($altwin) [expr $altwin + 2]"
+ set envlist [lreplace $envlist $altwin $altwin $newelector]
+ } else {
+ puts "\t$m: Election $i: Primary winner $win won."
+ }
+ process_msgs $envlist
+ }
+
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep003.tcl b/db-4.8.30/test/rep003.tcl
new file mode 100644
index 0000000..21a748c
--- /dev/null
+++ b/db-4.8.30/test/rep003.tcl
@@ -0,0 +1,304 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep003
+# TEST Repeated shutdown/restart replication test
+# TEST
+# TEST Run a quick put test in a replicated master environment;
+# TEST start up, shut down, and restart client processes, with
+# TEST and without recovery. To ensure that environment state
+# TEST is transient, use DB_PRIVATE.
+
+proc rep003 { method { tnum "003" } args } {
+ source ./include.tcl
+ global rep003_dbname rep003_omethod rep003_oargs
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Skip for record-based methods.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_record_based $method] != 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_record_based $method] } {
+ puts "Rep$tnum: Skipping for method $method"
+ return
+ }
+
+ set msg2 "with on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "with in-memory replication files"
+ }
+
+ set rep003_dbname rep003.db
+ set rep003_omethod [convert_method $method]
+ set rep003_oargs [convert_args $method $args]
+
+ # Run the body of the test with and without recovery. If we're
+ # testing in-memory logging, skip the combination of recovery
+ # and in-memory logging -- it doesn't make sense.
+
+ set logsets [create_logsets 2]
+ foreach recopt $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $recopt == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping for\
+ in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $recopt):\
+ Replication repeated-startup test $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep003_sub $method $tnum $l $recopt $args
+ }
+ }
+}
+
+proc rep003_sub { method tnum logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. This test already requires
+ # -txn, so adjust the logargs only.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ -errpfx MASTER $verbargs $repmemargs \
+ -home $masterdir -txn $m_logargs -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ puts "\tRep$tnum.a: Simple client startup test."
+
+ # Put item one.
+ rep003_put $masterenv A1 a-one
+
+ # Open a client.
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create -private -home $clientdir \
+ -txn $c_logargs -errpfx CLIENT $verbargs $repmemargs \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C) $recargs]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Put another quick item.
+ rep003_put $masterenv A2 a-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+ replclear 2
+
+ # Now reopen the client after doing another put.
+ puts "\tRep$tnum.b: Client restart."
+ rep003_put $masterenv B1 b-one
+
+ set clientenv [eval $env_cmd(C)]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Loop letting the client and master sync up and get the
+ # environment initialized. It's a new client env so
+ # reinitialize the envlist as well.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ rep003_put $masterenv B2 b-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a recovery.
+ puts "\tRep$tnum.c: Client restart after recovery."
+ rep003_put $masterenv C1 c-one
+
+ set clientenv [eval $env_cmd(C) -recover]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ rep003_put $masterenv C2 c-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a catastrophic recovery.
+ puts "\tRep$tnum.d: Client restart after catastrophic recovery."
+ rep003_put $masterenv D1 d-one
+
+ set clientenv [eval $env_cmd(C) -recover_fatal]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+ rep003_put $masterenv D2 d-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv D1 d-one
+ rep003_check $clientenv D2 d-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep003_put { masterenv key data } {
+ global rep003_dbname rep003_omethod rep003_oargs
+
+ set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
+ $rep003_omethod $rep003_oargs $rep003_dbname]
+ error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
+
+ set txn [$masterenv txn]
+ error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
+ error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
+
+proc rep003_check { env key data } {
+ global rep003_dbname
+
+ set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
+ error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ error_check_good rep3_check($key,$data) \
+ [lindex [lindex $dbt 0] 1] $data
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
diff --git a/db-4.8.30/test/rep005.tcl b/db-4.8.30/test/rep005.tcl
new file mode 100644
index 0000000..e81c775
--- /dev/null
+++ b/db-4.8.30/test/rep005.tcl
@@ -0,0 +1,364 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep005
+# TEST Replication election test with error handling.
+# TEST
+# TEST Run rep_test in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, forcing errors at various
+# TEST locations in the election path.
+
+proc rep005 { method args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep005: Skipping for method $method."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set tnum "005"
+ set niter 10
+ set nclients 3
+ set logsets [create_logsets [expr $nclients + 1]]
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases."
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ # We don't want to run this with -recover - it takes too
+ # long and doesn't cover any new ground.
+ set recargs ""
+ foreach l $logsets {
+ puts "Rep$tnum ($recargs): Replication election\
+ error test with $nclients clients $msg $msg2."
+ puts -nonewline "Rep$tnum: Started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep005_sub $method $tnum \
+ $niter $nclients $l $recargs $args
+ }
+}
+
+proc rep005_sub { method tnum niter nclients logset recargs largs } {
+ source ./include.tcl
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_logargs -errpfx MASTER $verbargs \
+ $m_txnargs -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ set envlist {}
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create \
+ -event rep_event $repmemargs \
+ -home $clientdir($i) $c_logargs($i) \
+ $c_txnargs($i) -rep_client $verbargs \
+ -errpfx CLIENT$i \
+ -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i) $recargs]
+ lappend envlist "$clientenv($i) $envid"
+ }
+
+ # Process startup messages
+ process_msgs $envlist
+ # Run rep_test in the master.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ # Process all the messages and close the master.
+ process_msgs $envlist
+
+ # Check that databases are in-memory or on-disk as expected.
+ check_db_location $masterenv
+ for { set i 0 } { $i < $nclients } { incr i } {
+ check_db_location $clientenv($i)
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+
+ # We set up the error list for each client. We know that the
+ # first client is the one calling the election, therefore, add
+ # the error location on sending the message (electsend) for that one.
+ set m "Rep$tnum"
+ set count 0
+ set win -1
+ #
+ set c0err { none electinit }
+ set c1err $c0err
+ set c2err $c0err
+ set numtests [expr [llength $c0err] * [llength $c1err] * \
+ [llength $c2err]]
+ puts "\t$m.b: Starting $numtests election with error tests"
+ set last_win -1
+ set win -1
+ foreach c0 $c0err {
+ foreach c1 $c1err {
+ foreach c2 $c2err {
+ set elist [list $c0 $c1 $c2]
+ rep005_elect env_cmd envlist $qdir \
+ $m $count win last_win $elist $logset
+ incr count
+ }
+ }
+ }
+
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+ puts -nonewline \
+ "Rep$tnum: Completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+}
+
+proc rep005_elect { ecmd celist qdir msg count \
+ winner lsn_lose elist logset} {
+ global elect_timeout elect_serial
+ global timeout_ok
+ global databases_in_memory
+ upvar $ecmd env_cmd
+ upvar $celist envlist
+ upvar $winner win
+ upvar $lsn_lose last_win
+
+ # Set the proper value for the first time through the
+ # loop. On subsequent passes, timeout_ok will already
+ # be set.
+ if { [info exists timeout_ok] == 0 } {
+ set timeout_ok 0
+ }
+
+ set nclients [llength $elist]
+ set nsites [expr $nclients + 1]
+
+ set cl_list {}
+ foreach pair $envlist {
+ set id [lindex $pair 1]
+ set i [expr $id - 2]
+ set clientenv($i) [lindex $pair 0]
+ set err_cmd($i) [lindex $elist $i]
+ set elect_pipe($i) INVALID
+ replclear $id
+ lappend cl_list $i
+ }
+
+ # Select winner. We want to test biggest LSN wins, and secondarily
+ # highest priority wins. If we already have a master, make sure
+ # we don't start a client in that master.
+ set el 0
+ if { $win == -1 } {
+ if { $last_win != -1 } {
+ set cl_list [lreplace $cl_list $last_win $last_win]
+ set el $last_win
+ }
+ set windex [berkdb random_int 0 [expr [llength $cl_list] - 1]]
+ set win [lindex $cl_list $windex]
+ } else {
+ # Easy case, if we have a master, the winner must be the
+ # same one as last time, just use $win.
+ # If client0 is the current existing master, start the
+ # election in client 1.
+ if {$win == 0} {
+ set el 1
+ }
+ }
+ # Winner has priority 100. If we are testing LSN winning, the
+ # make sure the lowest LSN client has the highest priority.
+ # Everyone else has priority 10.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set crash($i) 0
+ if { $i == $win } {
+ set pri($i) 100
+ } elseif { $i == $last_win } {
+ set pri($i) 200
+ } else {
+ set pri($i) 10
+ }
+ }
+
+ puts "\t$msg.b.$count: Start election (win=client$win) $elist"
+ set msg $msg.c.$count
+ set nsites $nclients
+ set nvotes $nsites
+ if { $databases_in_memory } {
+ set dbname { "" test.db }
+ } else {
+ set dbname test.db
+ }
+ run_election env_cmd envlist err_cmd pri crash \
+ $qdir $msg $el $nsites $nvotes $nclients $win \
+ 0 $dbname 0 $timeout_ok
+
+ #
+ # Sometimes test elections with an existing master.
+ # Other times test elections without master by closing the
+ # master we just elected and creating a new client.
+ # We want to weight it to close the new master. So, use
+ # a list to cause closing about 70% of the time.
+ #
+ set close_list { 0 0 0 1 1 1 1 1 1 1}
+ set close_len [expr [llength $close_list] - 1]
+ set close_index [berkdb random_int 0 $close_len]
+
+ # Unless we close the master, the next election will time out.
+ set timeout_ok 1
+
+ if { [lindex $close_list $close_index] == 1 } {
+ # Declare that we expect the next election to succeed.
+ set timeout_ok 0
+ puts -nonewline "\t\t$msg: Closing "
+ error_check_good log_flush [$clientenv($win) log_flush] 0
+ error_check_good newmaster_close [$clientenv($win) close] 0
+ #
+ # If the next test should win via LSN then remove the
+ # env before starting the new client so that we
+ # can guarantee this client doesn't win the next one.
+ set lsn_win { 0 0 0 0 1 1 1 1 1 1 }
+ set lsn_len [expr [llength $lsn_win] - 1]
+ set lsn_index [berkdb random_int 0 $lsn_len]
+ set rec_arg ""
+ set win_inmem [expr [string compare [lindex $logset \
+ [expr $win + 1]] in-memory] == 0]
+ if { [lindex $lsn_win $lsn_index] == 1 } {
+ set last_win $win
+ set dirindex [lsearch -exact $env_cmd($win) "-home"]
+ incr dirindex
+ set lsn_dir [lindex $env_cmd($win) $dirindex]
+ env_cleanup $lsn_dir
+ puts -nonewline "and cleaning "
+ } else {
+ #
+ # If we're not cleaning the env, decide if we should
+ # run recovery upon reopening the env. This causes
+ # two things:
+ # 1. Removal of region files which forces the env
+ # to read its __db.rep.egen file.
+ # 2. Adding a couple log records, so this client must
+ # be the next winner as well since it'll have the
+ # biggest LSN.
+ #
+ set rec_win { 0 0 0 0 0 0 1 1 1 1 }
+ set rec_len [expr [llength $rec_win] - 1]
+ set rec_index [berkdb random_int 0 $rec_len]
+ if { [lindex $rec_win $rec_index] == 1 } {
+ puts -nonewline "and recovering "
+ set rec_arg "-recover"
+ #
+ # If we're in memory and about to run
+ # recovery, we force ourselves not to win
+ # the next election because recovery will
+ # blow away the entire log in memory.
+ # However, we don't skip this entirely
+ # because we still want to force reading
+ # of __db.rep.egen.
+ #
+ if { $win_inmem } {
+ set last_win $win
+ } else {
+ set last_win -1
+ }
+ } else {
+ set last_win -1
+ }
+ }
+ puts "new master, new client $win"
+ set clientenv($win) [eval $env_cmd($win) $rec_arg]
+ error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE
+ #
+ # Since we started a new client, we need to replace it
+ # in the message processing list so that we get the
+ # new Tcl handle name in there.
+ set newel "$clientenv($win) [expr $win + 2]"
+ set envlist [lreplace $envlist $win $win $newel]
+ if { $rec_arg == "" || $win_inmem } {
+ set win -1
+ }
+ #
+ # Since we started a new client we want to give them
+ # all a chance to process everything outstanding before
+ # the election on the next iteration.
+ #
+ process_msgs $envlist
+ }
+}
diff --git a/db-4.8.30/test/rep006.tcl b/db-4.8.30/test/rep006.tcl
new file mode 100644
index 0000000..6550aef
--- /dev/null
+++ b/db-4.8.30/test/rep006.tcl
@@ -0,0 +1,213 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep006
+# TEST Replication and non-rep env handles.
+# TEST
+# TEST Run a modified version of test001 in a replicated master
+# TEST environment; verify that the database on the client is correct.
+# TEST Next, create a non-rep env handle to the master env.
+# TEST Attempt to open the database r/w to force error.
+
+proc rep006 { method { niter 1000 } { tnum "006" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ set logsets [create_logsets 2]
+
+ # All access methods are allowed.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping for in-memory logs\
+ with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication and\
+ non-rep env handles $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep006_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep006_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global testdir
+ global is_hp_test
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ # Open a master.
+ repladd 1
+ set max_locks 2500
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ -lock_max_objects $max_locks -lock_max_locks $max_locks \
+ -home $masterdir -errpfx MASTER $verbargs $repmemargs \
+ $m_txnargs $m_logargs -rep_master -rep_transport \
+ \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -lock_max_objects $max_locks -lock_max_locks $max_locks \
+ -home $clientdir -errpfx CLIENT $verbargs $repmemargs \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C) $recargs]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Check that databases are in-memory or on-disk as expected.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ check_db_location $masterenv
+ check_db_location $clientenv
+
+ # Verify the database in the client dir.
+ puts "\tRep$tnum.b: Verifying client database contents."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file $dbname $clientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ # Determine whether this build is configured with --enable-debug_rop
+ # or --enable-debug_wop; we'll need to skip portions of the test if so.
+ # Also check for *not* configuring with diagnostic. That similarly
+ # forces a different code path and we need to skip portions.
+ set conf [berkdb getconfig]
+ set skip_for_config 0
+ if { [is_substr $conf "debug_rop"] == 1 \
+ || [is_substr $conf "debug_wop"] == 1 \
+ || [is_substr $conf "diagnostic"] == 0 } {
+ set skip_for_config 1
+ }
+
+ # Skip if configured with --enable-debug_rop or --enable-debug_wop
+ # or without --enable-diagnostic,
+ # because the checkpoint won't fail in those cases.
+ if { $skip_for_config == 1 } {
+ puts "\tRep$tnum.c: Skipping based on configuration."
+ } else {
+ puts "\tRep$tnum.c: Verifying non-master db_checkpoint."
+ set stat \
+ [catch {exec $util_path/db_checkpoint -h $masterdir -1} ret]
+ error_check_good open_err $stat 1
+ error_check_good \
+ open_err1 [is_substr $ret "attempting to modify"] 1
+ }
+
+ # We have to skip this bit for HP-UX because we can't open an env
+ # twice, and for debug_rop/debug_wop because the open won't fail.
+ if { $is_hp_test == 1 } {
+ puts "\tRep$tnum.d: Skipping for HP-UX."
+ } elseif { $skip_for_config == 1 } {
+ puts "\tRep$tnum.d: Skipping based on configuration."
+ } else {
+ puts "\tRep$tnum.d: Verifying non-master access."
+
+ set rdenv [eval {berkdb_env_noerr} \
+ -home $masterdir $verbargs]
+ error_check_good rdenv [is_valid_env $rdenv] TRUE
+ #
+ # Open the db read/write which will cause it to try to
+ # write out a log record, which should fail.
+ #
+ set stat \
+ [catch {berkdb_open_noerr -env $rdenv $dbname} ret]
+ error_check_good open_err $stat 1
+ error_check_good \
+ open_err1 [is_substr $ret "attempting to modify"] 1
+ error_check_good rdenv_close [$rdenv close] 0
+ }
+
+ process_msgs $envlist
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep$tnum.e: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep007.tcl b/db-4.8.30/test/rep007.tcl
new file mode 100644
index 0000000..5bd1f96
--- /dev/null
+++ b/db-4.8.30/test/rep007.tcl
@@ -0,0 +1,265 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep007
+# TEST Replication and bad LSNs
+# TEST
+# TEST Run rep_test in a replicated master env.
+# TEST Close the client. Make additional changes to master.
+# TEST Close the master. Open the client as the new master.
+# TEST Make several different changes. Open the old master as
+# TEST the client. Verify periodically that contents are correct.
+# TEST This test is not appropriate for named in-memory db testing
+# TEST because the databases are lost when both envs are closed.
+proc rep007 { method { niter 10 } { tnum "007" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # All access methods are allowed.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping for\
+ in-memory logs with -recover."
+ continue
+ }
+ if { $r == "-recover" && $databases_in_memory } {
+ puts "Rep$tnum: Skipping for\
+ named in-memory databases with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r):\
+ Replication and bad LSNs $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client1 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client2 logs are [lindex $l 2]"
+ rep007_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep007_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR.2
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ set c_logtype [lindex $logset 1]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ set c2_logtype [lindex $logset 2]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -home $masterdir $verbargs -errpfx MASTER $repmemargs \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -home $clientdir $verbargs -errpfx CLIENT1 $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $c2_logargs \
+ -home $clientdir2 $verbargs -errpfx CLIENT2 $repmemargs \
+ -rep_transport \[list 3 replsend\]"
+ set cl2env [eval $cl2_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ # Run rep_test in the master (and update clients).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Databases should now have identical contents.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 1 1
+ rep_verify $masterdir $masterenv $clientdir2 $cl2env 0 1 1
+
+ puts "\tRep$tnum.b: Close client 1 and make master changes."
+ # Flush the log so that we don't lose any changes, since we'll be
+ # relying on having a good log when we run recovery when we open it
+ # later.
+ #
+ $clientenv log_flush
+ error_check_good client_close [$clientenv close] 0
+
+ # Change master and propagate changes to client 2.
+ set start $niter
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ set envlist "{$masterenv 1} {$cl2env 3}"
+ process_msgs $envlist
+
+ # We need to do a deletion here to cause meta-page updates,
+ # particularly for queue. Delete the first pair and remember
+ # what it is -- it should come back after the master is closed
+ # and reopened as a client.
+ set db1 [eval {berkdb_open_noerr} -env $masterenv -auto_commit $dbname]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+ set txn [$masterenv txn]
+ set c [eval $db1 cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db1] TRUE
+ set first [$c get -first]
+ set pair [lindex [$c get -first] 0]
+ set key [lindex $pair 0]
+ set data [lindex $pair 1]
+
+ error_check_good cursor_del [$c del] 0
+ error_check_good dbcclose [$c close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db1_close [$db1 close] 0
+ #
+ # Process the messages to get them out of the db. This also
+ # propagates the delete to client 2.
+ #
+ process_msgs $envlist
+
+ # Nuke those for closed client
+ replclear 2
+
+ # Databases 1 and 3 should now have identical contents.
+ # Database 2 should be different. First check 1 and 3. We
+ # have to wait to check 2 until the env is open again.
+ rep_verify $masterdir $masterenv $clientdir2 $cl2env 0 1 1
+
+ puts "\tRep$tnum.c: Close master, reopen client as master."
+ $masterenv log_flush
+ error_check_good master_close [$masterenv close] 0
+ set newmasterenv [eval $cl_envcmd $recargs -rep_master]
+
+ # Now we can check that database 2 does not match 3.
+ rep_verify $clientdir $newmasterenv $clientdir2 $cl2env 0 0 0
+
+ puts "\tRep$tnum.d: Make incompatible changes to new master."
+ set envlist "{$newmasterenv 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ set db [eval {berkdb_open_noerr} \
+ -env $newmasterenv -auto_commit -create $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set t [$newmasterenv txn]
+
+ # Force in a pair {10 10}. This works for all access
+ # methods and won't overwrite the old first pair for record-based.
+ set ret [eval {$db put} -txn $t 10 [chop_data $method 10]]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ error_check_good dbclose [$db close] 0
+
+ eval rep_test $method $newmasterenv NULL $niter $start $start 0 $largs
+ set envlist "{$newmasterenv 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ # Nuke those for closed old master
+ replclear 1
+
+ # Databases 2 and 3 should now match.
+ rep_verify $clientdir $newmasterenv $clientdir2 $cl2env 0 1 1
+
+ puts "\tRep$tnum.e: Open old master as client."
+ set newclientenv [eval $ma_envcmd -rep_client -recover]
+ set envlist "{$newclientenv 1} {$newmasterenv 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ # The pair we deleted earlier from the master should now
+ # have reappeared.
+ set db1 [eval {berkdb_open_noerr}\
+ -env $newclientenv -auto_commit $dbname]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+ set ret [$db1 get -get_both $key [pad_data $method $data]]
+ error_check_good get_both $ret [list $pair]
+ error_check_good db1_close [$db1 close] 0
+
+ set start [expr $niter * 2]
+ eval rep_test $method $newmasterenv NULL $niter $start $start 0 $largs
+ set envlist "{$newclientenv 1} {$newmasterenv 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ # Now all 3 should match again.
+ rep_verify $masterdir $newclientenv $clientdir $newmasterenv 0 1 1
+ rep_verify $masterdir $newclientenv $clientdir2 $cl2env 0 1 1
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+ error_check_good cl2_close [$cl2env close] 0
+ replclose $testdir/MSGQUEUEDIR
+ return
+}
diff --git a/db-4.8.30/test/rep008.tcl b/db-4.8.30/test/rep008.tcl
new file mode 100644
index 0000000..76cc913
--- /dev/null
+++ b/db-4.8.30/test/rep008.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep008
+# TEST Replication, back up and synchronizing
+# TEST
+# TEST Run a modified version of test001 in a replicated master
+# TEST environment.
+# TEST Close master and client.
+# TEST Copy the master log to the client.
+# TEST Clean the master.
+# TEST Reopen the master and client.
+proc rep008 { method { niter 10 } { tnum "008" } args } {
+
+ source ./include.tcl
+ global mixed_mode_logging
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ # This test depends on copying logs, so can't be run with
+ # in-memory logging.
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ puts "Rep$tnum ($method $r):\
+ Replication backup and synchronizing $msg2."
+ rep008_sub $method $niter $tnum $r $args
+ }
+}
+
+proc rep008_sub { method niter tnum recargs largs } {
+ global testdir
+ global util_path
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ -home $masterdir -errpfx MASTER $repmemargs \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ -home $clientdir -errpfx CLIENT $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 0 $tnum -env $masterenv $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client and master. Copy logs."
+ error_check_good client_close [$clientenv close] 0
+ error_check_good master_close [$masterenv close] 0
+ file copy -force $masterdir/log.0000000001 $testdir/log.save
+
+ puts "\tRep$tnum.c: Clean master and reopen"
+ #
+ # Add sleep calls to ensure master's new log doesn't match
+ # its old one in the ckp timestamp.
+ #
+ tclsleep 1
+ env_cleanup $masterdir
+ tclsleep 1
+ env_cleanup $clientdir
+ file copy -force $testdir/log.save $clientdir/log.0000000001
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ #
+ # We'll only catch this error if we turn on no-autoinit.
+ # Otherwise, the system will throw away everything on the
+ # client and resync.
+ #
+ $clientenv rep_config {noautoinit on}
+
+ # Process the messages to get them out of the db.
+ #
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ error_check_bad err $err 0
+ error_check_good errchk [is_substr $err "DB_REP_JOIN_FAILURE"] 1
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep009.tcl b/db-4.8.30/test/rep009.tcl
new file mode 100644
index 0000000..567b02c
--- /dev/null
+++ b/db-4.8.30/test/rep009.tcl
@@ -0,0 +1,198 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep009
+# TEST Replication and DUPMASTERs
+# TEST Run test001 in a replicated environment.
+# TEST
+# TEST Declare one of the clients to also be a master.
+# TEST Close a client, clean it and then declare it a 2nd master.
+proc rep009 { method { niter 10 } { tnum "009" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep009: Skipping for method $method."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set logsets [create_logsets 3]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($r): Replication DUPMASTER test $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client1 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client2 logs are [lindex $l 2]"
+ rep009_sub $method $niter $tnum 0 $l $r $args
+ rep009_sub $method $niter $tnum 1 $l $r $args
+ }
+ }
+}
+
+proc rep009_sub { method niter tnum clean logset recargs largs } {
+ global testdir
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR.2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ set c_logtype [lindex $logset 1]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ set c2_logtype [lindex $logset 2]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -home $masterdir $verbargs -errpfx MASTER $repmemargs \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client.
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -home $clientdir $verbargs -errpfx CLIENT1 $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Open a second client.
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $c2_logargs \
+ -home $clientdir2 $verbargs -errpfx CLIENT2 $repmemargs \
+ -rep_transport \[list 3 replsend\]"
+ set cl2env [eval $cl2_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 0 $tnum -env $masterenv $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Declare a client to be a master."
+ if { $clean } {
+ error_check_good clientenv_close [$clientenv close] 0
+ env_cleanup $clientdir
+ set clientenv [eval $cl_envcmd $recargs -rep_master]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ } else {
+ error_check_good client_master [$clientenv rep_start -master] 0
+ }
+
+ #
+ # Process the messages to get them out of the db.
+ #
+ for { set i 1 } { $i <= 3 } { incr i } {
+ set seen_dup($i) 0
+ }
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue \
+ $masterenv 1 0 NONE dup1 err1]
+ incr nproced [replprocessqueue \
+ $clientenv 2 0 NONE dup2 err2]
+ incr nproced [replprocessqueue \
+ $cl2env 3 0 NONE dup3 err3]
+ if { $dup1 != 0 } {
+ set seen_dup(1) 1
+ error_check_good downgrade1 \
+ [$masterenv rep_start -client] 0
+ }
+ if { $dup2 != 0 } {
+ set seen_dup(2) 1
+ error_check_good downgrade1 \
+ [$clientenv rep_start -client] 0
+ }
+ #
+ # We might get errors after downgrading as the former
+ # masters might get old messages from other clients.
+ # If we get an error make sure it is after downgrade.
+ if { $err1 != 0 } {
+ error_check_good seen_dup1_err $seen_dup(1) 1
+ error_check_good err1str [is_substr \
+ $err1 "invalid argument"] 1
+ }
+ if { $err2 != 0 } {
+ error_check_good seen_dup2_err $seen_dup(2) 1
+ error_check_good err2str [is_substr \
+ $err2 "invalid argument"] 1
+ }
+ #
+ # This should never happen. We'll check below.
+ #
+ if { $dup3 != 0 } {
+ set seen_dup(3) 1
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ error_check_good seen_dup1 $seen_dup(1) 1
+ error_check_good seen_dup2 $seen_dup(2) 1
+ error_check_bad seen_dup3 $seen_dup(3) 1
+
+ puts "\tRep$tnum.c: Close environments"
+ error_check_good master_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good cl2_close [$cl2env close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep010.tcl b/db-4.8.30/test/rep010.tcl
new file mode 100644
index 0000000..a38de0a
--- /dev/null
+++ b/db-4.8.30/test/rep010.tcl
@@ -0,0 +1,266 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep010
+# TEST Replication and ISPERM
+# TEST
+# TEST With consecutive message processing, make sure every
+# TEST DB_REP_PERMANENT is responded to with an ISPERM when
+# TEST processed. With gaps in the processing, make sure
+# TEST every DB_REP_PERMANENT is responded to with an ISPERM
+# TEST or a NOTPERM. Verify in both cases that the LSN returned
+# TEST with ISPERM is found in the log.
+proc rep010 { method { niter 100 } { tnum "010" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set msg "with on-disk databases"
+ if { $databases_in_memory } {
+ set msg "with named in-memory databases"
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping rep$tnum for method $method"
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication and ISPERM"
+ puts "Rep$tnum: with $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep010_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep010_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global rand_init
+ berkdb srand $rand_init
+ global perm_sent_list
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+ set perm_sent_list {{}}
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ $m_logargs $verbargs -errpfx MASTER $repmemargs \
+ -home $masterdir $m_txnargs -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create -home $clientdir $repmemargs \
+ $c_txnargs $c_logargs $verbargs -rep_client -errpfx CLIENT \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C) $recargs]
+
+ # Bring the client online. Since that now involves internal init, we
+ # have to avoid the special rep010_process_msgs here, because otherwise
+ # we would hang trying to open a log cursor.
+ #
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ # Open database in master, propagate to client.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname test.db
+ }
+ set db1 [eval {berkdb_open_noerr -create} $omethod -auto_commit \
+ -env $masterenv $largs $dbname]
+ rep010_process_msgs $masterenv $clientenv 1
+
+ puts "\tRep$tnum.a: Process messages with no gaps."
+ # Feed operations one at a time to master and immediately
+ # update client.
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $db1 put -txn $t $i [chop_data $method data$i]] 0
+ error_check_good txn_commit [$t commit] 0
+ rep010_process_msgs $masterenv $clientenv 1
+ }
+
+ # Replace data.
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set t [$masterenv txn]
+ set ret \
+ [$db1 get -get_both -txn $t $i [pad_data $method data$i]]
+ error_check_good db_put \
+ [$db1 put -txn $t $i [chop_data $method newdata$i]] 0
+ error_check_good txn_commit [$t commit] 0
+ rep010_process_msgs $masterenv $clientenv 1
+ }
+
+ # Try some aborts. These do not write permanent messages.
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set t [$masterenv txn]
+ error_check_good db_put [$db1 put -txn $t $i abort$i] 0
+ error_check_good txn_abort [$t abort] 0
+ rep010_process_msgs $masterenv $clientenv 0
+ }
+
+ puts "\tRep$tnum.b: Process messages with gaps."
+ # To test gaps in message processing, run and commit a whole
+ # bunch of transactions, then process the messages with skips.
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set t [$masterenv txn]
+ error_check_good db_put [$db1 put -txn $t $i data$i] 0
+ error_check_good txn_commit [$t commit] 0
+ }
+ set skip [berkdb random_int 2 8]
+ rep010_process_msgs $masterenv $clientenv 1 $skip
+
+ check_db_location $masterenv
+ check_db_location $clientenv
+
+ # Clean up.
+ error_check_good db1_close [$db1 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep010_process_msgs { masterenv clientenv check {skip_interval 0} } {
+ global perm_response_list
+ global perm_sent_list
+
+ set perm_response_list {{}}
+
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1 $skip_interval]
+ incr nproced [replprocessqueue $clientenv 2 $skip_interval]
+
+ # In this test, the ISPERM and NOTPERM messages are
+ # sent by the client back to the master. Verify that we
+ # get ISPERM when the client is caught up to the master
+ # (i.e. last client LSN in the log matches the LSN returned
+ # with the ISPERM), and that when we get NOTPERM, the client
+ # is not caught up.
+
+ # Create a list of the LSNs in the client log.
+ set lsnlist {}
+ set logc [$clientenv log_cursor]
+ error_check_good logc \
+ [is_valid_logc $logc $clientenv] TRUE
+ for { set logrec [$logc get -first] } \
+ { [llength $logrec] != 0 } \
+ { set logrec [$logc get -next] } {
+ lappend lsnlist [lindex [lindex $logrec 0] 1]
+ }
+ set lastloglsn [lindex $lsnlist end]
+
+ # Parse perm_response_list to find the LSN returned with
+ # ISPERM or NOTPERM.
+ set response [lindex $perm_response_list end]
+ set permtype [lindex $response 0]
+ set messagelsn [lindex [lindex $response 1] 1]
+
+ if { [llength $response] != 0 } {
+ if { $permtype == "NOTPERM" } {
+ # If we got a NOTPERM, the returned LSN has to
+ # be greater than the last LSN in the log.
+ error_check_good notpermlsn \
+ [expr $messagelsn > $lastloglsn] 1
+ } elseif { $permtype == "ISPERM" } {
+ # If we got an ISPERM, the returned LSN has to
+ # be in the log.
+ error_check_bad \
+ ispermlsn [lsearch $lsnlist $messagelsn] -1
+ } else {
+ puts "FAIL: unexpected message type $permtype"
+ }
+ }
+
+ error_check_good logc_close [$logc close] 0
+
+ # If we've finished processing all the messages, check
+ # that the last received permanent message LSN matches the
+ # last sent permanent message LSN.
+ if { $nproced == 0 } {
+ if { $check != 0 } {
+ set last_sent [lindex $perm_sent_list end]
+ set last_rec_msg \
+ [lindex $perm_response_list end]
+ set last_received [lindex $last_rec_msg 1]
+ error_check_good last_message \
+ $last_sent $last_received
+ }
+
+ # If we check correctly; empty out the lists
+ set perm_response_list {{}}
+ set perm_sent_list {{}}
+ break
+ }
+ }
+}
diff --git a/db-4.8.30/test/rep011.tcl b/db-4.8.30/test/rep011.tcl
new file mode 100644
index 0000000..6f8e2ab
--- /dev/null
+++ b/db-4.8.30/test/rep011.tcl
@@ -0,0 +1,195 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep011
+# TEST Replication: test open handle across an upgrade.
+# TEST
+# TEST Open and close test database in master environment.
+# TEST Update the client. Check client, and leave the handle
+# TEST to the client open as we close the masterenv and upgrade
+# TEST the client to master. Reopen the old master as client
+# TEST and catch up. Test that we can still do a put to the
+# TEST handle we created on the master while it was still a
+# TEST client, and then make sure that the change can be
+# TEST propagated back to the new client.
+
+proc rep011 { method { tnum "011" } args } {
+ global has_crypto
+ global passwd
+ global repfiles_in_memory
+
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set logsets [create_logsets 2]
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ set envargs ""
+ puts "Rep$tnum.a ($r $envargs $method):\
+ Test upgrade of open handles $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep011_sub $method $tnum $envargs $l $r $args
+
+ if { $has_crypto == 0 } {
+ continue
+ }
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+
+ puts "Rep$tnum.b ($r $envargs):\
+ Open handle upgrade test with encryption ($method)."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep011_sub $method $tnum $envargs $l $r $args
+ }
+ }
+}
+
+proc rep011_sub { method tnum envargs logset recargs largs } {
+ source ./include.tcl
+ global testdir
+ global encrypt
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ $m_logargs $envargs $verbargs -home $masterdir $repmemargs \
+ $m_txnargs -errpfx MASTER -rep_master -rep_transport \
+ \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create \
+ $c_logargs $envargs $verbargs -home $clientdir $repmemargs \
+ $c_txnargs -errpfx CLIENT -rep_client -rep_transport \
+ \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C) $recargs]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Open a test database on the master so we can test having
+ # handles open across an upgrade.
+ puts "\tRep$tnum.a:\
+ Opening test database for post-upgrade client logging test."
+ set master_upg_db [berkdb_open_noerr \
+ -create -auto_commit -btree -env $masterenv rep$tnum-upg.db]
+ set puttxn [$masterenv txn]
+ error_check_good master_upg_db_put \
+ [$master_upg_db put -txn $puttxn hello world] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ error_check_good master_upg_db_close [$master_upg_db close] 0
+
+ # Update the client.
+ process_msgs $envlist
+
+ # Open the cross-upgrade database on the client and check its contents.
+ set client_upg_db [berkdb_open_noerr \
+ -create -auto_commit -btree -env $clientenv rep$tnum-upg.db]
+ error_check_good client_upg_db_get [$client_upg_db get hello] \
+ [list [list hello world]]
+ # !!! We use this handle later. Don't close it here.
+
+ # Close master.
+ puts "\tRep$tnum.b: Close master."
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRep$tnum.c: Upgrade client."
+ set newmasterenv $clientenv
+ error_check_good upgrade_client [$newmasterenv rep_start -master] 0
+
+ puts "\tRep$tnum.d: Reopen old master as client and catch up."
+ set newclientenv [eval {berkdb_env_noerr -create -recover} $envargs \
+ -txn nosync -errpfx NEWCLIENT $verbargs \
+ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
+ set envlist "{$newclientenv 1} {$newmasterenv 2}"
+ process_msgs $envlist
+
+ # Test put to the database handle we opened back when the new master
+ # was a client.
+ puts "\tRep$tnum.e: Test put to handle opened before upgrade."
+ set puttxn [$newmasterenv txn]
+ error_check_good client_upg_db_put \
+ [$client_upg_db put -txn $puttxn hello there] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ process_msgs $envlist
+
+ # Close the new master's handle for the upgrade-test database; we
+ # don't need it. Then check to make sure the client did in fact
+ # update the database.
+ puts "\tRep$tnum.f: Test that client did update the database."
+ error_check_good client_upg_db_close [$client_upg_db close] 0
+ set newclient_upg_db \
+ [berkdb_open_noerr -env $newclientenv rep$tnum-upg.db]
+ error_check_good newclient_upg_db_get [$newclient_upg_db get hello] \
+ [list [list hello there]]
+ error_check_good newclient_upg_db_close [$newclient_upg_db close] 0
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep$tnum.g: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep012.tcl b/db-4.8.30/test/rep012.tcl
new file mode 100644
index 0000000..8a2edb3
--- /dev/null
+++ b/db-4.8.30/test/rep012.tcl
@@ -0,0 +1,292 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep012
+# TEST Replication and dead DB handles.
+# TEST
+# TEST Run a modified version of test001 in a replicated master env.
+# TEST Run in replicated environment with secondary indices too.
+# TEST Make additional changes to master, but not to the client.
+# TEST Downgrade the master and upgrade the client with open db handles.
+# TEST Verify that the roll back on clients gives dead db handles.
+proc rep012 { method { niter 10 } { tnum "012" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r):\
+ Replication and dead db handles $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep012_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep012_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global verbose_check_secondaries
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set orig_tdir $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR.2
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -errpfx ENV0 $verbargs $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set env0 [eval $ma_envcmd $recargs -rep_master]
+ set masterenv $env0
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -errpfx ENV1 $verbargs $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set env1 [eval $cl_envcmd $recargs -rep_client]
+ set clientenv $env1
+
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs \
+ $c2_logargs -errpfx ENV2 $verbargs $repmemargs \
+ -home $clientdir2 -rep_transport \[list 3 replsend\]"
+ set cl2env [eval $cl2_envcmd $recargs -rep_client]
+
+ if { $databases_in_memory } {
+ set testfile { "" test$tnum.db }
+ set pname { "" primary$tnum.db }
+ set sname { "" secondary$tnum.db }
+ } else {
+ set testfile "test$tnum.db"
+ set pname "primary$tnum.db"
+ set sname "secondary$tnum.db"
+ }
+ set omethod [convert_method $method]
+ set env0db [eval {berkdb_open_noerr -env $env0 -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $env0db] TRUE
+ set masterdb $env0db
+
+ set do_secondary 0
+ if { [is_btree $method] || [is_hash $method] } {
+ set do_secondary 1
+ # Open the primary
+ set mpdb [eval {berkdb_open_noerr -env $env0 -auto_commit \
+ -create -mode 0644} $largs $omethod $pname]
+ error_check_good dbopen [is_valid_db $mpdb] TRUE
+
+ # Open the secondary
+ # Open a 2nd handle to the same secondary
+ set msdb [eval {berkdb_open_noerr -env $env0 -auto_commit \
+ -create -mode 0644} $largs $omethod $sname]
+ error_check_good dbopen [is_valid_db $msdb] TRUE
+ error_check_good associate [$mpdb associate \
+ [callback_n 0] $msdb] 0
+ }
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$env0 1} {$env1 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ set env1db [eval {berkdb_open_noerr -env $env1 -auto_commit \
+ -mode 0644} $largs $omethod $testfile]
+ set clientdb $env1db
+ error_check_good dbopen [is_valid_db $env1db] TRUE
+ set env2db [eval {berkdb_open_noerr -env $cl2env -auto_commit \
+ -mode 0644} $largs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $env2db] TRUE
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.a.0: Running rep_test in replicated env."
+ eval rep_test $method $masterenv $masterdb $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ if { $do_secondary } {
+ # Put some data into the primary
+ puts "\tRep$tnum.a.1: Putting primary/secondary data on master."
+ eval rep012_sec $method $mpdb $niter keys data
+ process_msgs $envlist
+
+ set verbose_check_secondaries 1
+ check_secondaries $mpdb $msdb $niter keys data "Rep$tnum.b"
+ } else {
+ puts "\tRep$tnum.b: Skipping secondaries for method $method"
+ }
+
+ # Check that databases are in-memory or on-disk as expected.
+ # We can only check the secondaries if secondaries are allowed for
+ # this access method.
+ set names [list $testfile]
+ if { $do_secondary } {
+ lappend names $pname $sname
+ }
+ foreach name $names {
+ eval check_db_location $masterenv $name
+ eval check_db_location $clientenv $name
+ eval check_db_location $cl2env $name
+ }
+
+ puts "\tRep$tnum.c: Run test in master and client 2 only"
+ set nstart $niter
+ eval rep_test\
+ $method $masterenv $masterdb $niter $nstart $nstart 0 $largs
+
+ # Ignore messages for $env1.
+ set envlist "{$env0 1} {$cl2env 3}"
+ process_msgs $envlist
+
+ # Nuke those for client about to become master.
+ replclear 2
+ tclsleep 3
+ puts "\tRep$tnum.d: Swap envs"
+ set tmp $masterenv
+ set masterenv $clientenv
+ set clientenv $tmp
+ error_check_good downgrade [$clientenv rep_start -client] 0
+ error_check_good upgrade [$masterenv rep_start -master] 0
+ set envlist "{$env0 1} {$env1 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ #
+ # At this point, env0 should have rolled back across a txn commit.
+ # If we do any operation on env0db, we should get an error that
+ # the handle is dead.
+ puts "\tRep$tnum.e: Try to access db handle after rollback"
+ set stat1 [catch {$env0db stat} ret1]
+ error_check_good stat1 $stat1 1
+ error_check_good dead1 [is_substr $ret1 DB_REP_HANDLE_DEAD] 1
+
+ set stat3 [catch {$env2db stat} ret3]
+ error_check_good stat3 $stat3 1
+ error_check_good dead3 [is_substr $ret3 DB_REP_HANDLE_DEAD] 1
+
+ if { $do_secondary } {
+ #
+ # Check both secondary get and close to detect DEAD_HANDLE.
+ #
+ puts "\tRep$tnum.f: Try to access secondary db handles after rollback"
+ set verbose_check_secondaries 1
+ check_secondaries $mpdb $msdb $niter \
+ keys data "Rep$tnum.f" errp errs errsg
+ error_check_good deadp [is_substr $errp DB_REP_HANDLE_DEAD] 1
+ error_check_good deads [is_substr $errs DB_REP_HANDLE_DEAD] 1
+ error_check_good deadsg [is_substr $errsg DB_REP_HANDLE_DEAD] 1
+ puts "\tRep$tnum.g: Closing"
+ error_check_good mpdb [$mpdb close] 0
+ error_check_good msdb [$msdb close] 0
+ } else {
+ puts "\tRep$tnum.f: Closing"
+ }
+
+ error_check_good env0db [$env0db close] 0
+ error_check_good env1db [$env1db close] 0
+ error_check_good cl2db [$env2db close] 0
+ error_check_good env0_close [$env0 close] 0
+ error_check_good env1_close [$env1 close] 0
+ error_check_good cl2_close [$cl2env close] 0
+ replclose $testdir/MSGQUEUEDIR
+ set verbose_check_secondaries 0
+ set testdir $orig_tdir
+ return
+}
+
+proc rep012_sec {method pdb niter keysp datap} {
+ source ./include.tcl
+
+ upvar $keysp keys
+ upvar $datap data
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $niter } { incr n } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $method $datum]
+
+ set ret [$pdb put $key [chop_data $method $datum]]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+}
diff --git a/db-4.8.30/test/rep013.tcl b/db-4.8.30/test/rep013.tcl
new file mode 100644
index 0000000..5a73c47
--- /dev/null
+++ b/db-4.8.30/test/rep013.tcl
@@ -0,0 +1,299 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep013
+# TEST Replication and swapping master/clients with open dbs.
+# TEST
+# TEST Run a modified version of test001 in a replicated master env.
+# TEST Make additional changes to master, but not to the client.
+# TEST Swap master and client.
+# TEST Verify that the roll back on clients gives dead db handles.
+# TEST Rerun the test, turning on client-to-client synchronization.
+# TEST Swap and verify several times.
+proc rep013 { method { niter 10 } { tnum "013" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Set up named in-memory database testing.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases"
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ set anyopts { "" "anywhere" }
+ foreach r $test_recopts {
+ foreach l $logsets {
+ foreach a $anyopts {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($r $a): Replication and \
+ ($method) master/client swapping $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep013_sub $method $niter $tnum $l $r $a $args
+ }
+ }
+ }
+}
+
+proc rep013_sub { method niter tnum logset recargs anyopt largs } {
+ global testdir
+ global anywhere
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set orig_tdir $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR.2
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ if { $anyopt == "anywhere" } {
+ set anywhere 1
+ } else {
+ set anywhere 0
+ }
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Set number of swaps between master and client.
+ set nswap 6
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -errpfx ENV1 $verbargs $repmemargs \
+ -cachesize {0 4194304 3} \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set env1 [eval $ma_envcmd $recargs -rep_master]
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -errpfx ENV2 $verbargs $repmemargs \
+ -cachesize {0 2097152 2} \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set env2 [eval $cl_envcmd $recargs -rep_client]
+
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs \
+ $c2_logargs -errpfx ENV3 $verbargs $repmemargs \
+ -cachesize {0 1048576 1} \
+ -home $clientdir2 -rep_transport \[list 3 replsend\]"
+ set cl2env [eval $cl2_envcmd $recargs -rep_client]
+
+ # Set database name for in-memory or on-disk.
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+
+ set omethod [convert_method $method]
+
+ set env1db_cmd "berkdb_open_noerr -env $env1 -auto_commit \
+ -create -mode 0644 $largs $omethod $testfile"
+ set env1db [eval $env1db_cmd]
+ error_check_good dbopen [is_valid_db $env1db] TRUE
+
+ #
+ # Verify that a client creating a database gets an error.
+ #
+ set stat [catch {berkdb_open_noerr -env $env2 -auto_commit \
+ -create -mode 0644 $largs $omethod $testfile} ret]
+ error_check_good create_cl $stat 1
+ error_check_good cr_str [is_substr $ret "invalid"] 1
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$env1 1} {$env2 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ set env2db_cmd "berkdb_open_noerr -env $env2 -auto_commit \
+ -mode 0644 $largs $omethod $testfile"
+ set env2db [eval $env2db_cmd]
+ error_check_good dbopen [is_valid_db $env2db] TRUE
+ set env3db_cmd "berkdb_open_noerr -env $cl2env -auto_commit \
+ -mode 0644 $largs $omethod $testfile"
+ set env3db [eval $env3db_cmd]
+ error_check_good dbopen [is_valid_db $env3db] TRUE
+
+ #
+ # Set up all the master/client data we're going to need
+ # to keep track of and swap.
+ #
+ set masterenv $env1
+ set masterdb $env1db
+ set mid 1
+ set clientenv $env2
+ set clientdb $env2db
+ set cid 2
+ set mdb_cmd "berkdb_open_noerr -env $masterenv -auto_commit \
+ -mode 0644 $largs $omethod $testfile"
+ set cdb_cmd "berkdb_open_noerr -env $clientenv -auto_commit \
+ -mode 0644 $largs $omethod $testfile"
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval rep_test $method $masterenv $masterdb $niter 0 0 0 $largs
+ set envlist "{$env1 1} {$env2 2} {$cl2env 3}"
+ process_msgs $envlist
+
+ set nstart 0
+ for { set i 0 } { $i < $nswap } { incr i } {
+ puts "\tRep$tnum.b.$i: Check for bad db handles"
+ set dbl {masterdb clientdb env3db}
+ set dbcmd {$mdb_cmd $cdb_cmd $env3db_cmd}
+
+ set stat [catch {$masterdb stat} ret]
+ if { $stat == 1 } {
+ error_check_good dead [is_substr $ret \
+ DB_REP_HANDLE_DEAD] 1
+ error_check_good close [$masterdb close] 0
+ set masterdb [eval $mdb_cmd]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+ }
+
+ set stat [catch {$clientdb stat} ret]
+ if { $stat == 1 } {
+ error_check_good dead [is_substr $ret \
+ DB_REP_HANDLE_DEAD] 1
+ error_check_good close [$clientdb close] 0
+ set clientdb [eval $cdb_cmd]
+ error_check_good dbopen [is_valid_db $clientdb] TRUE
+ }
+
+ set stat [catch {$env3db stat} ret]
+ if { $stat == 1 } {
+ error_check_good dead [is_substr $ret \
+ DB_REP_HANDLE_DEAD] 1
+ error_check_good close [$env3db close] 0
+ set env3db [eval $env3db_cmd]
+ error_check_good dbopen [is_valid_db $env3db] TRUE
+ }
+
+ set nstart [expr $nstart + $niter]
+ puts "\tRep$tnum.c.$i: Run test in master and client2 only"
+ eval rep_test \
+ $method $masterenv $masterdb $niter $nstart $nstart 0 $largs
+ set envlist "{$masterenv $mid} {$cl2env 3}"
+ process_msgs $envlist
+
+ # Nuke those for client about to become master.
+ replclear $cid
+
+ # Swap all the info we need.
+ set tmp $masterenv
+ set masterenv $clientenv
+ set clientenv $tmp
+
+ set tmp $masterdb
+ set masterdb $clientdb
+ set clientdb $tmp
+
+ set tmp $mid
+ set mid $cid
+ set cid $tmp
+
+ set tmp $mdb_cmd
+ set mdb_cmd $cdb_cmd
+ set cdb_cmd $tmp
+
+ puts "\tRep$tnum.d.$i: Swap: master $mid, client $cid"
+ error_check_good downgrade [$clientenv rep_start -client] 0
+ error_check_good upgrade [$masterenv rep_start -master] 0
+ set envlist "{$env1 1} {$env2 2} {$cl2env 3}"
+ process_msgs $envlist
+ }
+ puts "\tRep$tnum.e: Check message handling of client."
+ set req3 [stat_field $cl2env rep_stat "Client service requests"]
+ set rereq1 [stat_field $env1 rep_stat "Client rerequests"]
+ set rereq2 [stat_field $env2 rep_stat "Client rerequests"]
+ if { $anyopt == "anywhere" } {
+ error_check_bad req $req3 0
+ error_check_bad rereq1 $rereq1 0
+ error_check_bad rereq2 $rereq2 0
+ } else {
+ error_check_good req $req3 0
+ error_check_good rereq1 $rereq1 0
+ error_check_good rereq2 $rereq2 0
+ }
+
+ # Check that databases are in-memory or on-disk as expected.
+ check_db_location $env1
+ check_db_location $env2
+ check_db_location $cl2env
+
+ puts "\tRep$tnum.f: Closing"
+ error_check_good masterdb [$masterdb close] 0
+ error_check_good clientdb [$clientdb close] 0
+ error_check_good cl2db [$env3db close] 0
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+ error_check_good cl2_close [$cl2env close] 0
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ set anywhere 0
+ return
+}
diff --git a/db-4.8.30/test/rep014.tcl b/db-4.8.30/test/rep014.tcl
new file mode 100644
index 0000000..df44873
--- /dev/null
+++ b/db-4.8.30/test/rep014.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep014
+# TEST Replication and multiple replication handles.
+# TEST Test multiple client handles, opening and closing to
+# TEST make sure we get the right openfiles.
+#
+proc rep014 { method { niter 10 } { tnum "014" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # We can't open two envs on HP-UX, so just skip the
+ # whole test since that is at the core of it.
+ if { $is_hp_test == 1 } {
+ puts "Rep$tnum: Skipping for HP-UX."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication\
+ and openfiles $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep014_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep014_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set orig_tdir $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir $repmemargs \
+ -rep_transport \[list 1 replsend\]"
+ set env0 [eval $ma_envcmd $recargs -rep_master]
+ set masterenv $env0
+
+ # Open a client.
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT1 -home $clientdir $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set env1 [eval $cl_envcmd $recargs]
+ error_check_good client_env [is_valid_env $env1] TRUE
+ set env2 [eval $cl_envcmd]
+ error_check_good client_env [is_valid_env $env2] TRUE
+
+ error_check_good e1_cl [$env1 rep_start -client] 0
+
+ # Set up databases for in-memory or on-disk.
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+
+ set omethod [convert_method $method]
+ set env0db [eval {berkdb_open_noerr -env $env0 -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ set masterdb $env0db
+ error_check_good dbopen [is_valid_db $env0db] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$env0 1} {$env1 2}"
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval rep_test $method $masterenv $masterdb $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close and reopen client env."
+ error_check_good env1_close [$env1 close] 0
+ set env1 [eval $cl_envcmd]
+ error_check_good client_env [is_valid_env $env1] TRUE
+ error_check_good e1_cl [$env1 rep_start -client] 0
+
+ puts "\tRep$tnum.c: Run test in master again."
+ set start $niter
+ eval rep_test $method $masterenv $masterdb $niter $start 0 0 $largs
+ set envlist "{$env0 1} {$env1 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.d: Start and close 2nd client env."
+ error_check_good e2_pfx [$env2 errpfx CLIENT2] 0
+ error_check_good e2_cl [$env2 rep_start -client] 0
+ error_check_good env2_close [$env2 close] 0
+
+ puts "\tRep$tnum.e: Run test in master again."
+ set start [expr $start + $niter]
+ error_check_good e1_pfx [$env1 errpfx CLIENT1] 0
+ eval rep_test $method $masterenv $masterdb $niter $start 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.f: Open env2, close env1, use env2."
+ set env2 [eval $cl_envcmd]
+ error_check_good client_env [is_valid_env $env2] TRUE
+ error_check_good e1_pfx [$env2 errpfx CLIENT2] 0
+ error_check_good e2_cl [$env2 rep_start -client] 0
+ error_check_good e1_pfx [$env1 errpfx CLIENT1] 0
+
+ # Check for on-disk or in-memory while we have all 3 envs.
+ check_db_location $masterenv
+ check_db_location $env1
+ check_db_location $env2
+
+ error_check_good env1_close [$env1 close] 0
+
+ puts "\tRep$tnum.g: Run test in master again."
+ set start [expr $start + $niter]
+ error_check_good e1_pfx [$env2 errpfx CLIENT2] 0
+ eval rep_test $method $masterenv $masterdb $niter $start 0 0 $largs
+ set envlist "{$env0 1} {$env2 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.h: Closing"
+ error_check_good env0db [$env0db close] 0
+ error_check_good env0_close [$env0 close] 0
+ error_check_good env2_close [$env2 close] 0
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
diff --git a/db-4.8.30/test/rep015.tcl b/db-4.8.30/test/rep015.tcl
new file mode 100644
index 0000000..4621beb
--- /dev/null
+++ b/db-4.8.30/test/rep015.tcl
@@ -0,0 +1,321 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep015
+# TEST Locking across multiple pages with replication.
+# TEST
+# TEST Open master and client with small pagesize and
+# TEST generate more than one page and generate off-page
+# TEST dups on the first page (second key) and last page
+# TEST (next-to-last key).
+# TEST Within a single transaction, for each database, open
+# TEST 2 cursors and delete the first and last entries (this
+# TEST exercises locks on regular pages). Intermittently
+# TEST update client during the process.
+# TEST Within a single transaction, for each database, open
+# TEST 2 cursors. Walk to the off-page dups and delete one
+# TEST from each end (this exercises locks on off-page dups).
+# TEST Intermittently update client.
+#
+proc rep015 { method { nentries 100 } { tnum "015" } { ndb 3 } args } {
+ global repfiles_in_memory
+ global rand_init
+ berkdb srand $rand_init
+
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Skipping rep$tnum for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: \
+ Skipping for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r):\
+ Replication and locking $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep015_sub $method $nentries $tnum $ndb $l $r $args
+ }
+ }
+}
+
+proc rep015_sub { method nentries tnum ndb logset recargs largs } {
+ global testdir
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Set up the master databases. The small pagesize quickly
+ # generates multiple pages and off-page dups.
+ set pagesize 512
+ puts "\tRep$tnum.a: Create and populate databases in master."
+ for { set i 0 } { $i < $ndb } { incr i } {
+ set db [eval berkdb_open_noerr -create $omethod -auto_commit \
+ -pagesize $pagesize -env $masterenv $largs -dup testdb$i.db]
+ set dblist($i) $db
+ #
+ # Populate, being sure to create multiple pages.
+ # The non-duplicate entries are pairs of the form
+ # {1, data1} {2, data2}. The duplicates are pairs of
+ # the form {2, dup1} {2, dup2}, {2, dup3}, etc.
+ #
+ for { set j 1 } { $j <= $nentries } { incr j } {
+ set t [$masterenv txn]
+ error_check_good put_$db [eval $db put -txn $t \
+ $j [chop_data $method data$j]] 0
+ error_check_good txn_commit [$t commit] 0
+ }
+ # Create off-page dups on key 2 and next-to-last key.
+ set t [$masterenv txn]
+ for { set j 1 } { $j <= $nentries } { incr j } {
+ error_check_good put_second [eval $db put -txn $t \
+ 2 [chop_data $method dup$j]] 0
+ error_check_good put_next_to_last [eval $db put \
+ -txn $t \
+ [expr $nentries - 1] [chop_data $method dup$j]] 0
+ }
+ error_check_good txn_commit [$t commit] 0
+ # Make sure there are off-page dups.
+ set stat [$db stat]
+ error_check_bad stat:offpage \
+ [is_substr $stat "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tRep$tnum.b: Propagate setup to clients."
+ process_msgs $envlist
+
+ # Open client databases so we can exercise locking there too.
+ for { set i 0 } { $i < $ndb } { incr i } {
+ set cdb [eval {berkdb_open_noerr} -auto_commit \
+ -env $clientenv $largs testdb$i.db]
+ set cdblist($i) $cdb
+ }
+
+ # Set up two cursors into each db. Randomly select a cursor
+ # and do the next thing: position, delete, or close.
+ foreach option { regular off-page } {
+ puts "\tRep$tnum.c: Transactional cursor deletes ($option)."
+
+ set t [$masterenv txn]
+ # Set up two cursors into each db, and initialize the next
+ # action to be done to POSITION.
+ for { set i 0 } { $i < [expr $ndb * 2] } { incr i } {
+ set db $dblist([expr $i / 2])
+ set mcurs($i) [eval {$db cursor} -txn $t]
+ error_check_good mcurs$i \
+ [is_valid_cursor $mcurs($i) $db] TRUE
+ set cnext($i) POSITION
+ }
+
+ set ct [$clientenv txn]
+ # Set up two cursors into each client db.
+ for { set i 0 } { $i < [expr $ndb * 2] } { incr i } {
+ set cdb $cdblist([expr $i / 2])
+ set ccurs($i) [eval {$cdb cursor} -txn $ct]
+ error_check_good ccurs$i \
+ [is_valid_cursor $ccurs($i) $cdb] TRUE
+ }
+
+ # Randomly pick a cursor to operate on and do the next thing.
+ # At POSITION, we position that cursor. At DELETE, we delete
+ # the current item. At CLOSE, we close the cursor. At DONE,
+ # we do nothing except check to see if all cursors have reached
+ # DONE, and quit when they have.
+ # On the off-page dup test, walk to reach an off-page entry,
+ # and delete that one.
+ set k 0
+ while { 1 } {
+ # Every nth time through, update the client.
+# set n 5
+# if {[expr $k % $n] == 0 } {
+# puts "Updating clients"
+# process_msgs $envlist
+# }
+# incr k
+ set i [berkdb random_int 0 [expr [expr $ndb * 2] - 1]]
+ set next $cnext($i)
+ switch -exact -- $next {
+ POSITION {
+ do_position $mcurs($i) \
+ $i $nentries $option
+ set cnext($i) DELETE
+ # Position the client cursors too.
+ do_position $ccurs($i) \
+ $i $nentries $option
+ }
+ DELETE {
+ error_check_good c_del \
+ [$mcurs($i) del] 0
+ set cnext($i) CLOSE
+ # Update clients after a delete.
+ process_msgs $envlist
+ }
+ CLOSE {
+ error_check_good c_close.$i \
+ [$mcurs($i) close] 0
+ set cnext($i) DONE
+ # Close the client cursor too.
+ error_check_good cc_close.$i \
+ [$ccurs($i) close] 0
+ }
+ DONE {
+ set breakflag 1
+ for { set j 0 } \
+ { $j < [expr $ndb * 2] } \
+ { incr j } {
+ if { $cnext($j) != "DONE" } {
+ set breakflag 0
+ }
+ }
+ if { $breakflag == 1 } {
+ break
+ }
+ }
+ default {
+ puts "FAIL: Unrecognized \
+ next action $next"
+ }
+ }
+ }
+ error_check_good txn_commit [$t commit] 0
+ error_check_good clienttxn_commit [$ct commit] 0
+ process_msgs $envlist
+ }
+
+ # Clean up.
+ for { set i 0 } { $i < $ndb } { incr i } {
+ set db $dblist($i)
+ error_check_good close_$db [$db close] 0
+ set cdb $cdblist($i)
+ error_check_good close_$cdb [$cdb close] 0
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+ return
+}
+
+proc do_position { cursor i nentries option } {
+ if { [expr $i % 2] == 0 } {
+ if { $option == "regular" } {
+ set ret [$cursor get -first]
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good get_first \
+ [string range $data 4 end] $key
+ } elseif { $option == "off-page" } {
+ set ret [$cursor get -set 2]
+ error_check_good get_key_2 \
+ [lindex [lindex $ret 0] 0] 2
+ error_check_good get_data_2 \
+ [lindex [lindex $ret 0] 1] data2
+ for { set j 1 } { $j <= 95 } { incr j } {
+ set ret [$cursor get -nextdup]
+ error_check_good key_nextdup$j \
+ [lindex [lindex $ret 0] 0] 2
+ error_check_good data_nextdup$j \
+ [lindex [lindex $ret 0] 1] dup$j
+ }
+ }
+ } else {
+ if { $option == "regular" } {
+ set ret [$cursor get -set $nentries]
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good get_set_$nentries \
+ [string range $data 4 end] $key
+ } elseif { $option == "off-page" } {
+ set ret [$cursor get -last]
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good get_last \
+ [string range $data 3 end] [expr $key + 1]
+ for { set j 1 } { $j <= 5 } { incr j } {
+ set ret [$cursor get -prev]
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good get_prev \
+ [string range $data 3 end] \
+ [expr [expr $key + 1] - $j]
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/rep016.tcl b/db-4.8.30/test/rep016.tcl
new file mode 100644
index 0000000..706c38b
--- /dev/null
+++ b/db-4.8.30/test/rep016.tcl
@@ -0,0 +1,293 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep016
+# TEST Replication election test with varying required nvotes.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST the master with varying required participants.
+
+proc rep016 { method args } {
+ global errorInfo
+ global databases_in_memory
+ global repfiles_in_memory
+
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ set tnum "016"
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ set nclients 5
+ set logsets [create_logsets [expr $nclients + 1]]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases"
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication\
+ elections with varying nvotes $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep016_sub $method $nclients $tnum $l $r $args
+ }
+ }
+}
+
+proc rep016_sub { method nclients tnum logset recargs largs } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set niter 5
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ set envlist {}
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_txnargs $m_logargs -rep_master $verbargs \
+ -errpfx MASTER -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ # Don't set -errfile now -- wait until the error catching
+ # portion of the test is complete.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \
+ -event rep_event $repmemargs \
+ $c_txnargs($i) $c_logargs($i) -rep_client $verbargs \
+ -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i) $recargs]
+ lappend envlist "$clientenv($i) $envid"
+ }
+ # Bring the clients online by processing the startup messages.
+ process_msgs $envlist
+
+ # Run a modified test001 in the master.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Check that databases are in-memory or on-disk as expected.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ check_db_location $masterenv
+ for { set i 0 } { $i < $nclients } { incr i } {
+ check_db_location $clientenv($i)
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ puts "\tRep$tnum.b: Error values for rep_elect"
+ #
+ # Do all the error catching in client0. We do not need to call
+ # start_election here to fork a process because we never get
+ # far enough to send/receive any messages. We just want to
+ # check the error message.
+ #
+ # !!!
+ # We cannot set -errpfx or -errfile or anything in the
+ # env_cmd above. Otherwise the correct output won't be set
+ # in 'ret' below and the test will fail.
+ #
+ # First check negative nvotes.
+ #
+ set nsites [expr $nclients + 1]
+ set priority 2
+ set timeout 5000000
+ #
+ # Setting nsites to 0 acts as a signal for rep_elect to use
+ # the configured nsites, but since we haven't set that yet,
+ # this should still fail. TODO: need another test verifying
+ # the proper operation when we *have* configured nsites.
+ #
+ set nsites 0
+ set nvotes 2
+ set res [catch {$clientenv(0) rep_elect $nsites $nvotes $priority \
+ $timeout} ret]
+ error_check_bad catch $res 0
+ error_check_good ret [is_substr $ret "is larger than nsites"] 1
+
+ #
+ # Check nvotes > nsites.
+ #
+ set nsites $nclients
+ set nvotes [expr $nsites + 1]
+ set res [catch {$clientenv(0) rep_elect $nsites $nvotes $priority \
+ $timeout} ret]
+ error_check_bad catch $res 0
+ error_check_good ret [is_substr $ret "is larger than nsites"] 1
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ #
+ # This test doesn't use the testing hooks, so
+ # initialize err_cmd and crash appropriately.
+ #
+ set err_cmd($i) "none"
+ set crash($i) 0
+ #
+ # Initialize the array pri. We'll set it to
+ # appropriate values when the winner is determined.
+ #
+ set pri($i) 0
+ #
+ if { $rep_verbose == 1 } {
+ $clientenv($i) errpfx CLIENT.$i
+ $clientenv($i) verbose $verbose_type on
+ $clientenv($i) errfile /dev/stderr
+ set env_cmd($i) [concat $env_cmd($i) \
+ "-errpfx CLIENT.$i -errfile /dev/stderr "]
+ }
+ }
+ set m "Rep$tnum.c"
+ puts "\t$m: Check single master/client can elect itself"
+ #
+ # 2 sites: 1 master, 1 client. Allow lone client to elect itself.
+ # Adjust client env list to reflect the single client.
+ #
+ set oldenvlist $envlist
+ set envlist [lreplace $envlist 1 end]
+ set nsites 2
+ set nvotes 1
+ set orig_ncl $nclients
+ set nclients 1
+ set elector 0
+ set winner 0
+ setpriority pri $nclients $winner
+ run_election env_cmd envlist err_cmd pri crash\
+ $qdir $m $elector $nsites $nvotes $nclients $winner 1 $dbname
+
+ #
+ # Now run with all clients. Client0 should always get elected
+ # because it became master and should have a bigger LSN.
+ #
+ set nclients $orig_ncl
+ set envlist [lreplace $oldenvlist 0 0 [lindex $envlist 0]]
+
+ set m "Rep$tnum.d"
+ puts "\t$m: Elect with 100% client participation"
+ set nsites $nclients
+ set nvotes $nclients
+ set winner [rep016_selectwinner $nsites $nvotes $nclients]
+ setpriority pri $nclients $winner
+ run_election env_cmd envlist err_cmd pri crash\
+ $qdir $m $elector $nsites $nvotes $nclients $winner 1 $dbname
+
+ #
+ # Elect with varying levels of participation. Start with nsites
+ # as nclients+1 (simulating a down master) and require nclients,
+ # and fewer (by 1) until we get down to 2 clients.
+ #
+ set m "Rep$tnum.e"
+ puts "\t$m: Elect with varying participation"
+ set nsites [expr $nclients + 1]
+ set count 0
+ for {set n $nclients} {$n > 1} {incr n -1} {
+ set m "Rep$tnum.e.$count"
+ set winner [rep016_selectwinner $nsites $n $n]
+ setpriority pri $nclients $winner
+ run_election env_cmd envlist err_cmd pri crash\
+ $qdir $m $elector $nsites $n $n $winner 1 $dbname
+ incr count
+ }
+
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep016_selectwinner { nsites nvotes nclients } {
+ #
+ # Special case: When we test with 100% participation, we expect
+ # client 0 to always win because it has a bigger LSN than the
+ # rest due to earlier part of the test. This special case is
+ # kinda gross.
+ #
+ if { $nsites != $nvotes } {
+ set win [berkdb random_int 0 [expr $nclients - 1]]
+ } else {
+ set win 0
+ }
+ return $win
+}
diff --git a/db-4.8.30/test/rep017.tcl b/db-4.8.30/test/rep017.tcl
new file mode 100644
index 0000000..8e46ea6
--- /dev/null
+++ b/db-4.8.30/test/rep017.tcl
@@ -0,0 +1,268 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep017
+# TEST Concurrency with checkpoints.
+# TEST
+# TEST Verify that we achieve concurrency in the presence of checkpoints.
+# TEST Here are the checks that we wish to make:
+# TEST While dbenv1 is handling the checkpoint record:
+# TEST Subsequent in-order log records are accepted.
+# TEST Accepted PERM log records get NOTPERM
+# TEST A subsequent checkpoint gets NOTPERM
+# TEST After checkpoint completes, next txn returns PERM
+proc rep017 { method { niter 10 } { tnum "017" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+
+ puts "Rep$tnum ($method $r):\
+ Concurrency with checkpoints $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep017_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep017_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global perm_response_list
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_cmd "berkdb_env_noerr -create $verbargs \
+ -log_max 1000000 $m_txnargs $m_logargs $repmemargs \
+ -home $masterdir -rep_master -errpfx MASTER \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_cmd $recargs]
+
+ # Open a client
+ repladd 2
+ set cl_cmd "berkdb_env_noerr -create -home $clientdir $verbargs \
+ $c_txnargs $c_logargs -rep_client -errpfx CLIENT $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_cmd $recargs]
+
+ # Bring the client online.
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ # Open database in master, make lots of changes so checkpoint
+ # will take a while, and propagate to client.
+ puts "\tRep$tnum.a: Create and populate database."
+ set dbname rep017.db
+ set db [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $db put -txn $t $i [chop_data $method data$i]] 0
+ error_check_good txn_commit [$t commit] 0
+ }
+ process_msgs "{$masterenv 1} {$clientenv 2}" 1
+
+ # Get the master's last LSN before the checkpoint
+ set pre_ckp_offset \
+ [stat_field $masterenv log_stat "Current log file offset"]
+
+ puts "\tRep$tnum.b: Checkpoint on master."
+ error_check_good checkpoint [$masterenv txn_checkpoint] 0
+
+ # Now get ckp LSN
+ set ckp_lsn [stat_field $masterenv txn_stat "LSN of last checkpoint"]
+ set ckp_offset [lindex $ckp_lsn 1]
+
+ # Fork child process on client. It should process whatever
+ # it finds in the message queue -- just the checkpoint record,
+ # for now. It's run in the background so the parent can
+ # test for whether we're checkpointing at the same time.
+ #
+ puts "\tRep$tnum.c: Fork child process on client."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep017script.tcl $testdir/repscript.log \
+ $masterdir $clientdir $rep_verbose $verbose_type &]
+
+
+ # We need to wait until we know that the client is processing a
+ # checkpoint. The checkpoint will consist of some DBREG records
+ # followed by the actual checkpoint. So, if we've gotten records
+ # later than the last LSN when the master took the checkpoint, we've
+ # begin the checkpoint. By test design, we should not finish the
+ # checkpoint until this process has at least had a chance to run.
+ #
+ # In order to do this, we have handles open on the message
+ # queue from both this process and its child. This is not
+ # normally legal behavior for an application using Berkeley DB,
+ # but this test depends on the parent process doing things while
+ # the child is pausing in the middle of the checkpoint. We are
+ # very careful to control which process is handling which
+ # messages.
+
+ puts "\tRep$tnum.d: Test whether client is in checkpoint."
+ while { 1 } {
+ set client_off \
+ [stat_field $clientenv log_stat "Current log file offset"]
+
+ if { $client_off > $pre_ckp_offset } {
+ if { $client_off > $ckp_offset } {
+ # We already completed the checkpoint and
+ # never got out of here. That's a bug in
+ # in the test.
+ error_check_good checkpoint_test \
+ not_in_checkpoint should_be_in_checkpoint
+ } else {
+ break;
+ }
+ } else {
+ # Not yet up to checkpoint
+ tclsleep 1
+ }
+ }
+
+ # Main client processes checkpoint 2nd time and should get NOTPERM.
+ puts "\tRep$tnum.e: Commit and checkpoint return NOTPERM from client"
+ incr niter
+ set t [$masterenv txn]
+ error_check_good db_put [eval $db put \
+ -txn $t $niter [chop_data $method data$niter]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good checkpoint [$masterenv txn_checkpoint] 0
+ set ckp2_lsn [stat_field $masterenv txn_stat "LSN of last checkpoint"]
+
+ process_msgs "{$clientenv 2}" 1
+
+ # Check that the checkpoint record got a NOTPERM
+ # Find the ckp LSN of the Master and then look for the response
+ # from that message in the client
+ set ckp_result ""
+ foreach i $perm_response_list {
+ # Everything in the list should be NOTPERM
+ if { [llength $i] == 0 } {
+ # Check for sentinel at beginning of list
+ continue;
+ }
+ set ckp_result [lindex $i 0]
+ error_check_good NOTPERM [is_substr $ckp_result NOTPERM] 1
+ if { [lindex $i 1] == $ckp2_lsn } {
+ break
+ }
+ }
+ error_check_bad perm_response $ckp_result ""
+
+ puts "\tRep$tnum.f: Waiting for child ..."
+ # Watch until the checkpoint is done.
+ watch_procs $pid 5
+
+ # Verify that the checkpoint is now complete on the client and
+ # that all later messages have been applied.
+ process_msgs "{$clientenv 2}" 1
+ set client_ckp [stat_field $clientenv txn_stat "LSN of last checkpoint"]
+ error_check_good matching_ckps $client_ckp $ckp2_lsn
+
+ set m_end [stat_field $masterenv log_stat "Current log file offset"]
+ set c_end [stat_field $clientenv log_stat "Current log file offset"]
+ error_check_good matching_lsn $c_end $m_end
+
+ # Finally, now that checkpoints are complete; perform another
+ # perm operation and make sure that it returns ISPERM.
+ puts "\tRep$tnum.g: No pending ckp; check for ISPERM"
+ incr niter
+ set t [$masterenv txn]
+ error_check_good db_put [eval $db put \
+ -txn $t $niter [chop_data $method data$niter]] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good checkpoint [$masterenv txn_checkpoint] 0
+ set ckp3_lsn [stat_field $masterenv txn_stat "LSN of last checkpoint"]
+
+ process_msgs "{$clientenv 2}" 1
+
+ # Check that the checkpoint and commit records got a ISPERM
+ # Find the ckp LSN of the Master and then look for the response
+ # from that message in the client
+ set ckp_result ""
+ foreach i $perm_response_list {
+ if { [llength $i] == 0 } {
+ # Check for sentinel at beginning of list
+ continue;
+ }
+
+ # Everything in the list should be ISPERM
+ set ckp_result [lindex $i 0]
+ error_check_good ISPERM [is_substr $ckp_result ISPERM] 1
+ if { [lindex $i 1] == $ckp3_lsn } {
+ break
+ }
+ }
+ error_check_bad perm_response $ckp_result ""
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep017script.tcl b/db-4.8.30/test/rep017script.tcl
new file mode 100644
index 0000000..25fb530
--- /dev/null
+++ b/db-4.8.30/test/rep017script.tcl
@@ -0,0 +1,83 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep017 script - concurrency with checkpoints.
+#
+# Repscript exists to process checkpoints, though the
+# way it is currently written, it will process whatever
+# it finds in the message queue. It requires a one-master
+# one-client setup.
+#
+# Usage: repscript masterdir clientdir rep_verbose verbose_type
+# masterdir: master env directory
+# clientdir: client env directory
+#
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript masterdir clientdir rep_verbose verbose_type"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set masterdir [ lindex $argv 0 ]
+set clientdir [ lindex $argv 1 ]
+set rep_verbose [ lindex $argv 2 ]
+set verbose_type [ lindex $argv 3 ]
+set verbargs ""
+if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+}
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# We need to set up our own machids.
+# Add 1 for master env id, and 2 for the clientenv id.
+#
+repladd 1
+repladd 2
+
+# Join the master env.
+set ma_cmd "berkdb_env_noerr -home $masterdir $verbargs \
+ -txn -rep_master -rep_transport \[list 1 replsend\]"
+set masterenv [eval $ma_cmd]
+error_check_good script_menv_open [is_valid_env $masterenv] TRUE
+
+puts "Master open"
+
+# Join the client env.
+set cl_cmd "berkdb_env_noerr -home $clientdir $verbargs \
+ -txn -rep_client -rep_transport \[list 2 replsend\]"
+set clientenv [eval $cl_cmd]
+error_check_good script_cenv_open [is_valid_env $clientenv] TRUE
+
+puts "Everyone open"
+tclsleep 10
+
+# Make it so that the client sleeps in the middle of checkpoints
+$clientenv test check 10
+
+puts "Client set"
+
+# Update the client, in order to process the checkpoint
+process_msgs "{$masterenv 1} {$clientenv 2}"
+
+
+puts "Processed messages"
+
+# Close the envs
+error_check_good script_master_close [$masterenv close] 0
+error_check_good script_client_close [$clientenv close] 0
+puts "\tRepscript completed successfully"
diff --git a/db-4.8.30/test/rep018.tcl b/db-4.8.30/test/rep018.tcl
new file mode 100644
index 0000000..2d1fd64
--- /dev/null
+++ b/db-4.8.30/test/rep018.tcl
@@ -0,0 +1,193 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep018
+# TEST Replication with dbremove.
+# TEST
+# TEST Verify that the attempt to remove a database file
+# TEST on the master hangs while another process holds a
+# TEST handle on the client.
+# TEST
+proc rep018 { method { niter 10 } { tnum "018" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication with dbremove $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep018_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep018_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ puts "\tRep$tnum.a: Create master and client, bring online."
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create \
+ -log_max 1000000 -home $masterdir $verbargs $repmemargs \
+ $m_txnargs $m_logargs -rep_master -errpfx MASTER \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create -home $clientdir $repmemargs \
+ $c_txnargs $c_logargs -rep_client $verbargs -errpfx CLIENT \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C) $recargs]
+
+ # Bring the client online.
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # db_remove in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ puts "\tRep$tnum.b: Open database on master, propagate to client."
+ set dbname rep$tnum.db
+ set db [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ set t [$masterenv txn]
+ for { set i 1 } { $i <= $niter } { incr i } {
+ error_check_good db_put \
+ [eval $db put -txn $t $i [chop_data $method data$i]] 0
+ }
+ error_check_good txn_commit [$t commit] 0
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ puts "\tRep$tnum.c: Spawn a child tclsh to do client work."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep018script.tcl $testdir/rep018script.log $clientdir \
+ $niter $dbname $method $rep_verbose $verbose_type &]
+
+ puts "\tRep$tnum.d: Close and remove database on master."
+ error_check_good close_master_db [$db close] 0
+
+ # Remove database in master env. First make sure the child
+ # tclsh is done reading the data.
+ while { 1 } {
+ if { [file exists $testdir/marker.db] == 0 } {
+ tclsleep 1
+ } else {
+ set markerenv [berkdb_env -home $testdir -txn]
+ error_check_good markerenv_open \
+ [is_valid_env $markerenv] TRUE
+ set marker [berkdb_open -unknown -env $markerenv \
+ -auto_commit marker.db]
+ while { [llength [$marker get CHILDREADY]] == 0 } {
+ tclsleep 1
+ }
+ break
+ }
+ }
+ error_check_good db_remove [$masterenv dbremove -auto_commit $dbname] 0
+
+ puts "\tRep$tnum.e: Create new database on master with the same name."
+ set db [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ error_check_good new_db_open [is_valid_db $db] TRUE
+
+ puts "\tRep$tnum.f: Propagate changes to client. Process should hang."
+ error_check_good timestamp_remove \
+ [$marker put PARENTREMOVE [timestamp -r]] 0
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+ error_check_good timestamp_done \
+ [$marker put PARENTDONE [timestamp -r]] 0
+
+ watch_procs $pid 5
+
+ puts "\tRep$tnum.g: Check for failure."
+ # Check marker file for correct timestamp ordering.
+ set ret [$marker get CHILDDONE]
+ set childdone [lindex [lindex [lindex $ret 0] 1] 0]
+ set ret [$marker get PARENTDONE]
+ set parentdone [lindex [lindex [lindex $ret 0] 1] 0]
+ if { [expr $childdone - $parentdone] > 0 } {
+ puts "\tFAIL: parent must complete after child"
+ }
+
+ # Clean up.
+ error_check_good marker_db_close [$marker close] 0
+ error_check_good market_env_close [$markerenv close] 0
+ error_check_good masterdb_close [$db close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+
+ # Check log file for failures.
+ set errstrings [eval findfail $testdir/rep018script.log]
+ foreach str $errstrings {
+ puts "FAIL: error message in rep018 log file: $str"
+ }
+}
+
diff --git a/db-4.8.30/test/rep018script.tcl b/db-4.8.30/test/rep018script.tcl
new file mode 100644
index 0000000..50007d3
--- /dev/null
+++ b/db-4.8.30/test/rep018script.tcl
@@ -0,0 +1,98 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep018 script - concurrency with checkpoints.
+#
+# Test dbremove with replication.
+#
+# Usage: rep018script clientdir dbfile
+# clientdir: client env directory
+# niter: number of items in file
+# dbfile: name of database file
+# rep_verbose: Is the test doing verbose reporting?
+# verbose_type: What subset of verbose messages?
+#
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript clientdir niter dbfile method rep_verbose verbose_type"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set clientdir [ lindex $argv 0 ]
+set niter [ lindex $argv 1 ]
+set dbfile [ lindex $argv 2 ]
+set method [ lindex $argv 3 ]
+set rep_verbose [ lindex $argv 4 ]
+set verbose_type [ lindex $argv 5 ]
+set verbargs ""
+if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+}
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+#
+# We need to set up our own machids.
+# Add 1 for master env id, and 2 for the clientenv id.
+#
+repladd 1
+repladd 2
+
+# Join the client env.
+set cl_cmd "berkdb_env_noerr -home $clientdir $verbargs -errpfx CHILD \
+ -txn -rep_client -rep_transport \[list 2 replsend\]"
+set clientenv [eval $cl_cmd]
+error_check_good script_cenv_open [is_valid_env $clientenv] TRUE
+
+# Make sure we can read data on client.
+set db [eval "berkdb_open -env $clientenv $dbfile"]
+for { set i 1 } { $i <= $niter } { incr i } {
+ set ret [lindex [$db get $i] 0]
+ error_check_good db_get $ret [list $i [pad_data $method data$i]]
+}
+
+# Put a timestamp in a shared file.
+set markerenv [berkdb_env -create -home $testdir -txn]
+error_check_good markerenv_open [is_valid_env $markerenv] TRUE
+set marker \
+ [eval "berkdb_open -create -btree -auto_commit -env $markerenv marker.db"]
+error_check_good timestamp_ready \
+ [$marker put CHILDREADY [timestamp -r]] 0
+
+# Give the parent a chance to process messages and hang.
+tclsleep 30
+
+# Clean up the child so the parent can go forward.
+error_check_good timestamp_done \
+ [$marker put CHILDDONE [timestamp -r]] 0
+error_check_good client_db_close [$db close] 0
+
+# Check that the master is done.
+while { [llength [$marker get PARENTDONE]] == 0 } {
+ tclsleep 1
+}
+
+# Verify that the newly recreated database is now empty.
+set db [eval "berkdb_open -env $clientenv $dbfile"]
+set cursor [$db cursor]
+error_check_good db_empty [llength [$cursor get -first]] 0
+error_check_good cursor_close [$cursor close] 0
+error_check_good db_close [$db close] 0
+error_check_good marker_db_close [$marker close] 0
+error_check_good markerenv_close [$markerenv close] 0
+error_check_good script_client_close [$clientenv close] 0
+
diff --git a/db-4.8.30/test/rep019.tcl b/db-4.8.30/test/rep019.tcl
new file mode 100644
index 0000000..beac631
--- /dev/null
+++ b/db-4.8.30/test/rep019.tcl
@@ -0,0 +1,184 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep019
+# TEST Replication and multiple clients at same LSN.
+# TEST Have several clients at the same LSN. Run recovery at
+# TEST different times. Declare a client master and after sync-up
+# TEST verify all client logs are identical.
+#
+proc rep019 { method { nclients 3 } { tnum "019" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # This test needs to use recovery, so mixed-mode testing
+ # isn't appropriate, nor in-memory database testing.
+ global databases_in_memory
+ if { $databases_in_memory > 0 } {
+ puts "Rep$tnum: Skipping for in-memory databases."
+ return
+ }
+ global mixed_mode_logging
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ puts "Rep$tnum ($method $r): Replication\
+ and $nclients recovered clients in sync $msg2."
+ rep019_sub $method $nclients $tnum $r $args
+ }
+}
+
+proc rep019_sub { method nclients tnum recargs largs } {
+ global testdir
+ global util_path
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set orig_tdir $testdir
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set niter 100
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ -home $masterdir -rep_master -errpfx MASTER $repmemargs \
+ -rep_transport \[list 1 replsend\]"
+ set menv [eval $ma_envcmd $recargs]
+
+ for {set i 0} {$i < $nclients} {incr i} {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set id($i) [expr 2 + $i]
+ repladd $id($i)
+ set cl_envcmd($i) "berkdb_env_noerr -create -txn nosync \
+ -home $clientdir($i) $verbargs -errpfx CLIENT.$i \
+ $repmemargs \
+ -rep_client -rep_transport \[list $id($i) replsend\]"
+ set clenv($i) [eval $cl_envcmd($i) $recargs]
+ error_check_good client_env [is_valid_env $clenv($i)] TRUE
+ }
+ set testfile "test$tnum.db"
+ set omethod [convert_method $method]
+ set masterdb [eval {berkdb_open_noerr -env $menv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist {}
+ lappend envlist "$menv 1"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ lappend envlist "$clenv($i) $id($i)"
+ }
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval rep_test $method $menv $masterdb $niter 0 0 0 0 $largs
+ process_msgs $envlist
+
+ error_check_good mdb_cl [$masterdb close] 0
+ # Process any close messages.
+ process_msgs $envlist
+
+ error_check_good menv_cl [$menv close] 0
+ puts "\tRep$tnum.b: Close all envs and run recovery in clients."
+ for {set i 0} {$i < $nclients} {incr i} {
+ error_check_good cl$i.close [$clenv($i) close] 0
+ set hargs($i) "-h $clientdir($i)"
+ }
+ foreach sleep {2 1 0} {
+ for {set i 0} {$i < $nclients} {incr i} {
+ set stat [catch {eval exec $util_path/db_recover \
+ $hargs($i)} result]
+ error_check_good stat $stat 0
+ #
+ # Need to sleep to make sure recovery's checkpoint
+ # records have different timestamps.
+ tclsleep $sleep
+ }
+ }
+
+ puts "\tRep$tnum.c: Reopen clients and declare one master."
+ for {set i 0} {$i < $nclients} {incr i} {
+ set clenv($i) [eval $cl_envcmd($i) $recargs]
+ error_check_good client_env [is_valid_env $clenv($i)] TRUE
+ }
+ error_check_good master0 [$clenv(0) rep_start -master] 0
+
+ puts "\tRep$tnum.d: Sync up with other clients."
+ while { 1 } {
+ set nproced 0
+
+ for {set i 0} {$i < $nclients} {incr i} {
+ incr nproced [replprocessqueue $clenv($i) $id($i)]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ puts "\tRep$tnum.e: Verify client logs match."
+ set i 0
+ error_check_good cl$i.close [$clenv($i) close] 0
+ set stat [catch {eval exec $util_path/db_printlog \
+ $hargs($i) >& $clientdir($i)/prlog} result]
+ #
+ # Note we start the loop at 1 here and compare against client0
+ # which became the master.
+ #
+ for {set i 1} {$i < $nclients} {incr i} {
+ error_check_good cl$i.close [$clenv($i) close] 0
+ fileremove -f $clientdir($i)/prlog
+ set stat [catch {eval exec $util_path/db_printlog \
+ $hargs($i) >> $clientdir($i)/prlog} result]
+ error_check_good stat_prlog $stat 0
+ error_check_good log_cmp(0,$i) \
+ [filecmp $clientdir(0)/prlog $clientdir($i)/prlog] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
+
diff --git a/db-4.8.30/test/rep020.tcl b/db-4.8.30/test/rep020.tcl
new file mode 100644
index 0000000..c8a79c7
--- /dev/null
+++ b/db-4.8.30/test/rep020.tcl
@@ -0,0 +1,331 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep020
+# TEST Replication elections - test election generation numbers.
+# TEST
+
+proc rep020 { method args } {
+ global rand_init
+ global databases_in_memory
+ global repfiles_in_memory
+
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ set tnum "020"
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+ set nclients 5
+ set logsets [create_logsets [expr $nclients + 1]]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases"
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ puts "Rep$tnum ($method): Election generation test $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep020_sub $method $nclients $tnum $l $args
+ }
+}
+
+proc rep020_sub { method nclients tnum logset largs } {
+ source ./include.tcl
+ global errorInfo
+ global databases_in_memory
+ global mixed_mode_logging
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ set envlist {}
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 $verbargs \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_txnargs $m_logargs -rep_master \
+ -errpfx MASTER -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create -event rep_event \
+ $verbargs -home $clientdir($i) $repmemargs \
+ $c_txnargs($i) $c_logargs($i) \
+ -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ lappend envlist "$clientenv($i) $envid"
+ }
+
+ # Run a modified test001 in the master.
+ process_msgs $envlist
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set niter 10
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Check that databases are in-memory or on-disk as expected.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ check_db_location $masterenv
+ for { set i 0 } { $i < $nclients } { incr i } {
+ check_db_location $clientenv($i)
+ }
+
+ # Close master.
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ replclear [expr $i + 2]
+ set err_cmd($i) "none"
+ set pri($i) 10
+ set crash($i) 0
+ if { $rep_verbose == 1 } {
+ $clientenv($i) errpfx CLIENT$i
+ $clientenv($i) verbose $verbose_type on
+ $clientenv($i) errfile /dev/stderr
+ set env_cmd($i) [concat $env_cmd($i) \
+ "-errpfx CLIENT$i -errfile /dev/stderr"]
+ }
+ }
+
+ set msg "Rep$tnum.b"
+ puts "\t$msg: Run elections to increment egen."
+
+ set nelect 2
+ set nsites $nclients
+ set nvotes $nclients
+ for { set j 0 } { $j < $nelect } { incr j } {
+ # Pick winner and elector randomly.
+ set winner [berkdb random_int 0 [expr $nclients - 1]]
+ setpriority pri $nclients $winner
+ set elector [berkdb random_int 0 [expr $nclients - 1]]
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $msg $elector $nsites $nvotes $nclients $winner 1 $dbname
+ }
+ process_msgs $envlist
+
+ set msg "Rep$tnum.c"
+ puts "\t$msg: Updating egen when getting an old vote."
+
+ #
+ # Find the last client and save the election generation number.
+ # Close the last client and adjust the list of envs to process.
+ #
+ set i [expr $nclients - 1]
+ set last [lindex $envlist end]
+ set clientenv($i) [lindex $last 0]
+ set egen($i) \
+ [stat_field $clientenv($i) rep_stat "Election generation number"]
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ set envlist [lreplace $envlist end end]
+
+ # Run a few more elections while the last client is closed.
+ # Make sure we don't pick the closed client as the winner,
+ # and require votes from one fewer site.
+ #
+ set orig_nvotes $nvotes
+ set orig_nclients $nclients
+ set nvotes [expr $orig_nvotes - 1]
+ set nclients [expr $orig_nclients - 1]
+ for { set j 0 } { $j < $nelect } { incr j } {
+ set winner [berkdb random_int 0 [expr $nclients - 1]]
+ setpriority pri $nclients $winner
+ set elector [berkdb random_int 0 [expr $nclients - 1]]
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $msg $elector $nsites $nvotes $nclients $winner 1 $dbname
+ }
+ process_msgs $envlist
+ #
+ # Verify that the last client's election generation number has
+ # changed, and that it matches the other clients.
+ #
+ set pair [lindex $envlist 0]
+ set clenv [lindex $pair 0]
+ set clegen [stat_field \
+ $clenv rep_stat "Election generation number"]
+
+ # Reopen last client's env. Do not run recovery, but do
+ # process messages to get the egen updated.
+ replclear $envid
+ set clientenv($i) [eval $env_cmd($i)]
+ lappend envlist "$clientenv($i) $envid"
+ error_check_good client_reopen [is_valid_env $clientenv($i)] TRUE
+ process_msgs $envlist
+
+ set newegen($i) \
+ [stat_field $clientenv($i) rep_stat "Election generation number"]
+ error_check_bad egen_changed $newegen($i) $egen($i)
+ error_check_good egen_changed1 $newegen($i) $clegen
+
+ set msg "Rep$tnum.d"
+ puts "\t$msg: New client starts election."
+ #
+ # Run another election, this time called by the last client.
+ # This should succeed because the last client has already
+ # caught up to the others for egen.
+ #
+ set winner 2
+ set nvotes $orig_nvotes
+ set nclients $orig_nclients
+ set elector [expr $nclients - 1]
+ setpriority pri $nclients $winner
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $msg $elector $nsites $nvotes $nclients $winner 0 $dbname
+
+ set newegen($i) \
+ [stat_field $clientenv($i) rep_stat "Election generation number"]
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set egen($i) [stat_field \
+ $clientenv($i) rep_stat "Election generation number"]
+ }
+ error_check_good egen_catchup $egen(4) $egen(3)
+
+ # Skip this part of the test for mixed-mode logging,
+ # since we can't recover with in-memory logs.
+ if { $mixed_mode_logging == 0 } {
+ set msg "Rep$tnum.e"
+ puts "\t$msg: Election generation set as expected after recovery."
+ # Note all client egens. Close, recover, process messages,
+ # and check that egens are unchanged.
+ set big_e [big_endian]
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ # Can only get egen file if repfiles on-disk.
+ if { $repfiles_in_memory == 0 } {
+ set fid [open $clientdir($i)/__db.rep.egen r]
+ fconfigure $fid -translation binary
+ set data [read $fid 4]
+ if { $big_e } {
+ binary scan $data I egen($i)
+ } else {
+ binary scan $data i egen($i)
+ }
+ binary scan $data c val
+ close $fid
+ }
+ $clientenv($i) log_flush
+ error_check_good \
+ clientenv_close($i) [$clientenv($i) close] 0
+ set clientenv($i) [eval $env_cmd($i) -recover]
+ set envlist [lreplace \
+ $envlist $i $i "$clientenv($i) [expr $i + 2]"]
+ }
+ process_msgs $envlist
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set newegen($i) [stat_field $clientenv($i) \
+ rep_stat "Election generation number"]
+ if { $repfiles_in_memory == 0 } {
+ error_check_good egen_recovery $egen($i) \
+ $newegen($i)
+ } else {
+ # For rep in-memory, egen expected to start
+ # over at 1 after close/reopen environment.
+ error_check_good egen_recovery $newegen($i) 1
+ }
+ }
+
+ # Run an election. Now the egens should go forward.
+ set winner [berkdb random_int 0 [expr $nclients - 1]]
+ setpriority pri $nclients $winner
+ set elector [berkdb random_int 0 [expr $nclients - 1]]
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $msg $elector $nsites $nvotes $nclients $winner 1 $dbname
+
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set newegen($i) [stat_field $clientenv($i) \
+ rep_stat "Election generation number"]
+ if { $repfiles_in_memory == 0 } {
+ error_check_good egen_forward \
+ [expr $newegen($i) > $egen($i)] 1
+ } else {
+ # For rep in-memory, egen expected to
+ # increment to 2 after election.
+ error_check_good egen_recovery $newegen($i) 2
+ }
+ }
+ }
+
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
diff --git a/db-4.8.30/test/rep021.tcl b/db-4.8.30/test/rep021.tcl
new file mode 100644
index 0000000..f17eb1a
--- /dev/null
+++ b/db-4.8.30/test/rep021.tcl
@@ -0,0 +1,330 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep021
+# TEST Replication and multiple environments.
+# TEST Run similar tests in separate environments, making sure
+# TEST that some data overlaps. Then, "move" one client env
+# TEST from one replication group to another and make sure that
+# TEST we do not get divergent logs. We either match the first
+# TEST record and end up with identical logs or we get an error.
+# TEST Verify all client logs are identical if successful.
+#
+proc rep021 { method { nclients 3 } { tnum "021" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # This test depends on copying logs, so can't be run with
+ # in-memory logging.
+ global mixed_mode_logging
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+
+ # This test closes its envs, so it's not appropriate for
+ # testing of in-memory named databases.
+ global databases_in_memory
+ if { $databases_in_memory } {
+ puts "Rep$tnum: Skipping for in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets [expr $nclients + 1]]
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication\
+ and $nclients recovered clients in sync $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep021_sub $method $nclients $tnum $l $r $args
+ }
+ }
+}
+
+proc rep021_sub { method nclients tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set orig_tdir $testdir
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set niter 100
+ set offset 5
+ set masterdir $testdir/MASTERDIR
+ set masterdir2 $testdir/MASTERDIR.NEW
+ file mkdir $masterdir
+ file mkdir $masterdir2
+
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ # We want to run the test 3 times in 2 separate repl envs.
+ # This is a little bit tricky due to how we manage replication
+ # in Tcl. It assumes one replication group.
+ # This is tricky because we need to manage/clear the repl
+ # message queues for the different groups when running
+ # to one group or the other.
+ # To accomplish this we run entirely in the 2nd group first.
+ # We set it up and then close all its envs. Then we run
+ # to the 1st group, and set it up. Then we add in a client
+ # from the 2nd group into the existing 1st group.
+ # Although we're doing them in separate parts, this is
+ # a description of what we're doing.
+ #
+ # 1. First add divergent data to database:
+ # RepGrp1: Add niter data from 0 to database.
+ # RepGrp2: Add niter data from offset to database.
+ # This gives us overlapping data in the databases, but they're
+ # additions will be at different offsets in the log files.
+ #
+ # 2. Add identical data to both databases.
+ # RepGrp1: Add niter data from niter + offset to database.
+ # RepGrp2: Add niter data from niter + offset to database.
+ # This gives us identical data in the databases and logs.
+ #
+ # 3. Again add divergent data to databases.
+ # RepGrp1: Add niter data from niter*2+offset to database.
+ # RepGrp2: Add niter data from niter*2+offset*2 to database.
+ # This gives us overlapping data in the databases, but they're
+ # additions will be at different offsets in the log files.
+ #
+ # 4. Add a client from one group to the other. Then try
+ # to sync up that client. We should get a failure with
+ # one of the non-matching error messages:
+ # "Too few log files to sync with master"
+ # REP_JOIN_FAILURE
+
+ # Open a 2nd master. Make all the 2nd env ids >= 10.
+ # For the 2nd group, just have 1 master and 1 client.
+ repladd 10
+ set ma2_envcmd "berkdb_env_noerr -create $m_txnargs $verbargs \
+ $m_logargs -home $masterdir2 $repmemargs \
+ -rep_master -rep_transport \[list 10 replsend\]"
+ set menv2 [eval $ma2_envcmd $recargs]
+
+ set clientdir2 $testdir/CLIENTDIR.NEW
+ file mkdir $clientdir2
+ set id2 11
+ set c_logtype($id2) [lindex $logset 1]
+ set c_logargs($id2) [adjust_logargs $c_logtype($id2)]
+ set c_txnargs($id2) [adjust_txnargs $c_logtype($id2)]
+
+ set id2 11
+ repladd $id2
+ set cl2_envcmd "berkdb_env_noerr -create $c_txnargs($id2) $verbargs \
+ $c_logargs($id2) -home $clientdir2 $repmemargs \
+ -rep_client -rep_transport \[list $id2 replsend\]"
+ set clenv2 [eval $cl2_envcmd $recargs]
+
+ set testfile "test$tnum.db"
+ set omethod [convert_method $method]
+
+ set masterdb2 [eval {berkdb_open_noerr -env $menv2 -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $masterdb2] TRUE
+
+ #
+ # Process startup messages
+ #
+ set env2list {}
+ lappend env2list "$menv2 10"
+ lappend env2list "$clenv2 $id2"
+ process_msgs $env2list
+
+ #
+ # Set up the three runs of rep_test. We need the starting
+ # point for each phase of the test for each group.
+ #
+ set e1phase1 0
+ set e2phase1 $offset
+ set e1phase2 [expr $niter + $offset]
+ set e2phase2 $e1phase2
+ set e1phase3 [expr $e1phase2 + $niter]
+ set e2phase3 [expr $e2phase2 + $niter + $offset]
+
+ puts "\tRep$tnum.a: Running rep_test in 2nd replicated env."
+ eval rep_test $method $menv2 $masterdb2 $niter $e2phase1 1 1 $largs
+ eval rep_test $method $menv2 $masterdb2 $niter $e2phase2 1 1 $largs
+ eval rep_test $method $menv2 $masterdb2 $niter $e2phase3 1 1 $largs
+ error_check_good mdb_cl [$masterdb2 close] 0
+ process_msgs $env2list
+
+ puts "\tRep$tnum.b: Close 2nd replicated env. Open primary."
+ error_check_good mdb_cl [$clenv2 close] 0
+ error_check_good mdb_cl [$menv2 close] 0
+ replclose $testdir/MSGQUEUEDIR
+
+ #
+ # Run recovery in client now to blow away region files so
+ # that this client comes in as a "new" client and announces itself.
+ #
+ set stat [catch {eval exec $util_path/db_recover -h $clientdir2} result]
+ error_check_good stat $stat 0
+
+ #
+ # Now we've run in the 2nd env. We have everything we need
+ # set up and existing in that env. Now run the test in the
+ # 1st env and then we'll try to add in the client.
+ #
+ replsetup $testdir/MSGQUEUEDIR
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $verbargs \
+ $m_logargs -home $masterdir $repmemargs \
+ -rep_master -rep_transport \[list 1 replsend\]"
+ set menv [eval $ma_envcmd $recargs]
+
+ for {set i 0} {$i < $nclients} {incr i} {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ set id($i) [expr 2 + $i]
+ repladd $id($i)
+ set cl_envcmd($i) "berkdb_env_noerr -create $c_txnargs($i) \
+ $c_logargs($i) -home $clientdir($i) $repmemargs \
+ $verbargs \
+ -rep_client -rep_transport \[list $id($i) replsend\]"
+ set clenv($i) [eval $cl_envcmd($i) $recargs]
+ }
+
+ set masterdb [eval {berkdb_open_noerr -env $menv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist {}
+ lappend envlist "$menv 1"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ lappend envlist "$clenv($i) $id($i)"
+ }
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.c: Running rep_test in primary replicated env."
+ eval rep_test $method $menv $masterdb $niter $e1phase1 1 1 $largs
+ eval rep_test $method $menv $masterdb $niter $e1phase2 1 1 $largs
+ eval rep_test $method $menv $masterdb $niter $e1phase3 1 1 $largs
+ error_check_good mdb_cl [$masterdb close] 0
+ # Process any close messages.
+ process_msgs $envlist
+
+ puts "\tRep$tnum.d: Add unrelated client into replication group."
+ set i $nclients
+ set orig $nclients
+ set nclients [expr $nclients + 1]
+
+ set clientdir($i) $clientdir2
+ set id($i) [expr 2 + $i]
+ repladd $id($i)
+ set cl_envcmd($i) "berkdb_env_noerr -create -txn nosync \
+ -home $clientdir($i) $verbargs $repmemargs \
+ -rep_client -rep_transport \[list $id($i) replsend\]"
+ set clenv($i) [eval $cl_envcmd($i) $recargs]
+ #
+ # We'll only catch an error if we turn on no-autoinit.
+ # Otherwise, the system will throw away everything on the
+ # client and resync.
+ #
+ $clenv($i) rep_config {noautoinit on}
+
+ lappend envlist "$clenv($i) $id($i)"
+
+ fileremove -f $clientdir2/prlog.orig
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $clientdir2 >> $clientdir2/prlog.orig} result]
+
+ set err 0
+ process_msgs $envlist 0 NONE err
+
+ puts "\tRep$tnum.e: Close all envs and run recovery in clients."
+ error_check_good menv_cl [$menv close] 0
+ for {set i 0} {$i < $nclients} {incr i} {
+ error_check_good cl$i.close [$clenv($i) close] 0
+ set hargs($i) "-h $clientdir($i)"
+ }
+ set i [expr $nclients - 1]
+ fileremove -f $clientdir($i)/prlog
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $clientdir($i) >> $clientdir($i)/prlog} result]
+
+ # If we got an error, then the log should match the original
+ # and the error message should tell us the client was never
+ # part of this environment.
+ #
+ if { $err != 0 } {
+ puts "\tRep$tnum.f: Verify client log matches original."
+ error_check_good log_cmp(orig,$i) \
+ [filecmp $clientdir($i)/prlog.orig $clientdir($i)/prlog] 0
+ puts "\tRep$tnum.g: Verify client error."
+ error_check_good errchk [is_substr $err \
+ "REP_JOIN_FAILURE"] 1
+ } else {
+ puts "\tRep$tnum.f: Verify client log doesn't match original."
+ error_check_good log_cmp(orig,$i) \
+ [filecmp $clientdir($i)/prlog.orig $clientdir($i)/prlog] 1
+ puts "\tRep$tnum.g: Verify new client log matches master."
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $masterdir >& $masterdir/prlog} result]
+ fileremove -f $clientdir($i)/prlog
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $clientdir($i) >> $clientdir($i)/prlog} result]
+ error_check_good stat_prlog $stat 0
+ error_check_good log_cmp(master,$i) \
+ [filecmp $masterdir/prlog $clientdir($i)/prlog] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
+
diff --git a/db-4.8.30/test/rep022.tcl b/db-4.8.30/test/rep022.tcl
new file mode 100644
index 0000000..048f097
--- /dev/null
+++ b/db-4.8.30/test/rep022.tcl
@@ -0,0 +1,316 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep022
+# TEST Replication elections - test election generation numbers
+# TEST during simulated network partition.
+# TEST
+proc rep022 { method args } {
+
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ global rand_init
+ global mixed_mode_logging
+ global databases_in_memory
+ global repfiles_in_memory
+
+ set tnum "022"
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+
+ if { $databases_in_memory > 0 } {
+ puts "Rep$tnum: Skipping for in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+ set nclients 5
+ set logsets [create_logsets [expr $nclients + 1]]
+ foreach l $logsets {
+ puts "Rep$tnum ($method): Election generation test\
+ with simulated network partition $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep022_sub $method $nclients $tnum $l $args
+ }
+}
+
+proc rep022_sub { method nclients tnum logset largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ set envlist {}
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 $verbargs \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_txnargs $m_logargs -rep_master \
+ -errpfx MASTER -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT.$i -event rep_event $repmemargs \
+ -home $clientdir($i) $c_txnargs($i) $c_logargs($i) \
+ -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ lappend envlist "$clientenv($i) $envid"
+ }
+
+ # Bring the clients online by processing the startup messages.
+ process_msgs $envlist
+
+ # Run a modified test001 in the master.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set niter 10
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ replclear [expr $i + 2]
+ set err_cmd($i) "none"
+ set pri($i) 10
+ set crash($i) 0
+ if { $rep_verbose == 1 } {
+ $clientenv($i) errpfx CLIENT$i
+ $clientenv($i) verbose $verbose_type on
+ $clientenv($i) errfile /dev/stderr
+ set env_cmd($i) [concat $env_cmd($i) \
+ "-errpfx CLIENT$i -errfile /dev/stderr"]
+ }
+ }
+
+ set msg "Rep$tnum.b"
+ puts "\t$msg: Run election for clients 0,1,2."
+ #
+ # Run an election with clients 0, 1, and 2.
+ # Make client 0 be the winner, and let it stay master.
+ #
+ set origlist $envlist
+ set orignclients $nclients
+ set envlist [lrange $origlist 0 2]
+ set nclients 3
+ set nsites 3
+ set nvotes 3
+ set winner 0
+ setpriority pri $nclients $winner
+ set elector [berkdb random_int 0 [expr $nclients - 1]]
+ run_election env_cmd envlist err_cmd pri crash \
+ $qdir $msg $elector $nsites $nvotes $nclients $winner 0 test.db
+
+ set msg "Rep$tnum.c"
+ puts "\t$msg: Close and reopen client 2 with recovery."
+ #
+ # Now close and reopen 2 with recovery. Update the
+ # list of all client envs with the new information.
+ #
+ replclear 5
+ replclear 6
+ error_check_good flush [$clientenv(2) log_flush] 0
+ error_check_good clientenv_close(2) [$clientenv(2) close] 0
+ set clientenv(2) [eval $env_cmd(2) -recover]
+ set origlist [lreplace $origlist 2 2 "$clientenv(2) 4"]
+
+ # Get last LSN for client 2.
+ set logc [$clientenv(2) log_cursor]
+ error_check_good logc \
+ [is_valid_logc $logc $clientenv(2)] TRUE
+ set lastlsn2 [lindex [lindex [$logc get -last] 0] 1]
+ error_check_good close_cursor [$logc close] 0
+
+ set msg "Rep$tnum.d"
+ puts "\t$msg: Close and reopen client 4 with recovery."
+ #
+ # This forces the last LSN for client 4 up to the last
+ # LSN for client 2 so client 4 can be elected.
+ #
+ set lastlsn4 0
+ while { $lastlsn4 < $lastlsn2 } {
+ error_check_good clientenv_close(4) [$clientenv(4) close] 0
+ set clientenv(4) [eval $env_cmd(4) -recover]
+ error_check_good flush [$clientenv(4) log_flush] 0
+ set origlist [lreplace $origlist 4 4 "$clientenv(4) 6"]
+ set logc [$clientenv(4) log_cursor]
+ error_check_good logc \
+ [is_valid_logc $logc $clientenv(4)] TRUE
+ set lastlsn4 [lindex [lindex [$logc get -last] 0] 1]
+ error_check_good close_cursor [$logc close] 0
+ }
+
+ set msg "Rep$tnum.e"
+ puts "\t$msg: Run election for clients 2,3,4."
+ #
+ # Run an election with clients 2, 3, 4.
+ # Make last client be the winner, and let it stay master.
+ # Need to process messages before running election so
+ # that clients 2 and 4 update to the right gen with
+ # client 3.
+ #
+ set envlist [lrange $origlist 2 4]
+ process_msgs $envlist
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set egen($i) [stat_field \
+ $clientenv($i) rep_stat "Election generation number"]
+ }
+ set winner 4
+ setpriority pri $nclients $winner 2
+ set elector [berkdb random_int 2 4]
+ run_election env_cmd envlist err_cmd pri crash \
+ $qdir $msg $elector $nsites $nvotes $nclients $winner 0 test.db
+
+ # Note egens for all the clients.
+ set envlist $origlist
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set egen($i) [stat_field \
+ $clientenv($i) rep_stat "Election generation number"]
+ }
+
+ # Have client 4 (currently a master) run an operation.
+ eval rep_test $method $clientenv(4) NULL $niter 0 0 0 $largs
+
+ # Check that clients 0 and 4 get DUPMASTER messages and
+ # restart them as clients.
+ #
+ puts "\tRep$tnum.f: Check for DUPMASTER"
+ set envlist0 [lrange $envlist 0 0]
+ process_msgs $envlist0 0 dup err
+ error_check_good is_dupmaster0 [lindex $dup 0] 1
+ error_check_good downgrade0 [$clientenv(0) rep_start -client] 0
+
+ set envlist4 [lrange $envlist 4 4]
+ process_msgs $envlist4 0 dup err
+ error_check_good is_dupmaster4 [lindex $dup 0] 1
+ error_check_good downgrade4 [$clientenv(4) rep_start -client] 0
+
+ # All DUPMASTER messages are now gone.
+ # We might get residual errors however because client 4
+ # responded as a master to client 0 and then became a
+ # client immediately. Therefore client 4 might get some
+ # "master-only" records and return EINVAL. We want to
+ # ignore those and process records until calm is restored.
+ set err 1
+ while { $err == 1 } {
+ process_msgs $envlist 0 dup err
+ error_check_good no_dupmaster $dup 0
+ }
+
+ # Check LSNs before new election.
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set logc [$clientenv($i) log_cursor]
+ error_check_good logc \
+ [is_valid_logc $logc $clientenv($i)] TRUE
+ set lastlsn [lindex [lindex [$logc get -last] 0] 1]
+ error_check_good cursor_close [$logc close] 0
+ }
+
+ set msg "Rep$tnum.g"
+ puts "\t$msg: Run election for all clients after DUPMASTER."
+
+ # Call a new election with all participants. Make 4 the
+ # winner, since it should have a high enough LSN to win.
+ set nclients $orignclients
+ set nsites $nclients
+ set nvotes $nclients
+ set winner 4
+ setpriority pri $nclients $winner
+ set elector [berkdb random_int 0 [expr $nclients - 1]]
+ run_election env_cmd envlist err_cmd pri crash \
+ $qdir $msg $elector $nsites $nvotes $nclients $winner 0 test.db
+
+ # Pull out new egens.
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set newegen($i) [stat_field \
+ $clientenv($i) rep_stat "Election generation number"]
+ }
+
+ # Egen numbers should all be the same now, and all greater than
+ # they were before the election.
+ set currentegen $newegen(0)
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set egen_diff [expr $newegen($i) - $egen($i)]
+ error_check_good egen_increased [expr $egen_diff > 0] 1
+ error_check_good newegens_match $currentegen $newegen($i)
+ }
+
+ # Clean up.
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+
diff --git a/db-4.8.30/test/rep023.tcl b/db-4.8.30/test/rep023.tcl
new file mode 100644
index 0000000..0cc88e8
--- /dev/null
+++ b/db-4.8.30/test/rep023.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep023
+# TEST Replication using two master handles.
+# TEST
+# TEST Open two handles on one master env. Create two
+# TEST databases, one through each master handle. Process
+# TEST all messages through the first master handle. Make
+# TEST sure changes made through both handles are picked
+# TEST up properly.
+#
+proc rep023 { method { niter 10 } { tnum "023" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # We can't open two envs on HP-UX, so just skip the
+ # whole test since that is at the core of it.
+ if { $is_hp_test == 1 } {
+ puts "Rep$tnum: Skipping for HP-UX."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery, and
+ # with and without -rep_start.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ foreach startopt { 0 1 } {
+ if { $startopt == 1 } {
+ set startmsg "with rep_start"
+ } else {
+ set startmsg ""
+ }
+ puts "Rep$tnum ($method $r $startmsg):\
+ Replication with two master handles $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep023_sub $method \
+ $niter $tnum $l $r $startopt $args
+ }
+ }
+ }
+}
+
+proc rep023_sub { method niter tnum logset recargs startopt largs } {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open 1st master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir $repmemargs \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv1 [eval $ma_envcmd $recargs -rep_master]
+
+ # Open 2nd handle on master. The master envs will share
+ # the same envid.
+ set masterenv2 [eval $ma_envcmd]
+ if { $startopt == 1 } {
+ error_check_good rep_start [$masterenv2 rep_start -master] 0
+ }
+
+ # Open a client.
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs $verbargs -errpfx CLIENT -home $clientdir \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ # Process messages on the first masterenv handle, not the second.
+ set envlist "{$masterenv1 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Set up databases in-memory or on-disk.
+ if { $databases_in_memory } {
+ set testfile1 { "" m1$tnum.db }
+ set testfile2 { "" m2$tnum.db }
+ } else {
+ set testfile1 "m1$tnum.db"
+ set testfile2 "m2$tnum.db"
+ }
+
+ puts "\tRep$tnum.a: Create database using 1st master handle."
+ # Create a database using the 1st master.
+ set omethod [convert_method $method]
+ set db1 [eval {berkdb_open_noerr -env $masterenv1 -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile1]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ puts "\tRep$tnum.b: Create database using 2nd master handle."
+ # Create a different database using the 2nd master.
+ set db2 [eval {berkdb_open_noerr -env $masterenv2 -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+
+ puts "\tRep$tnum.c: Process messages."
+ # Process messages.
+ process_msgs $envlist
+
+ # Check that databases are in-memory or on-disk as expected.
+ check_db_location $masterenv1 $testfile1
+ check_db_location $masterenv2 $testfile1
+ check_db_location $masterenv1 $testfile2
+ check_db_location $masterenv2 $testfile2
+
+ puts "\tRep$tnum.d: Run rep_test in 1st master; process messages."
+ eval rep_test $method $masterenv1 $db1 $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.e: Run rep_test in 2nd master; process messages."
+ eval rep_test $method $masterenv2 $db2 $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Contents of the two databases should match.
+ error_check_good db_compare [db_compare \
+ $db1 $db2 $masterdir/$testfile1 $masterdir/$testfile2] 0
+
+ puts "\tRep$tnum.f: Close 2nd master."
+ error_check_good db2 [$db2 close] 0
+ error_check_good master2_close [$masterenv2 close] 0
+
+ puts "\tRep$tnum.g: Run test in master again."
+ eval rep_test $method $masterenv1 $db1 $niter $niter 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.h: Closing"
+ error_check_good db1 [$db1 close] 0
+ error_check_good env0_close [$masterenv1 close] 0
+ error_check_good env2_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+ return
+}
diff --git a/db-4.8.30/test/rep024.tcl b/db-4.8.30/test/rep024.tcl
new file mode 100644
index 0000000..178978e
--- /dev/null
+++ b/db-4.8.30/test/rep024.tcl
@@ -0,0 +1,236 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep024
+# TEST Replication page allocation / verify test
+# TEST
+# TEST Start a master (site 1) and a client (site 2). Master
+# TEST closes (simulating a crash). Site 2 becomes the master
+# TEST and site 1 comes back up as a client. Verify database.
+
+proc rep024 { method { niter 1000 } { tnum "024" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ global fixed_len
+ set orig_fixed_len $fixed_len
+ set fixed_len 448
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # This test is not appropriate for in-memory databases.
+ if { $databases_in_memory } {
+ puts "Skipping rep$tnum for named in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run all tests with and without recovery.
+ set envargs ""
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): \
+ Replication page allocation/verify test $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep024_sub $method $niter $tnum $envargs $l $r $args
+ }
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
+
+proc rep024_sub { method niter tnum envargs logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. This test requires -txn, so
+ # we only have to adjust the logargs.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(1) "berkdb_env_noerr -create $repmemargs \
+ -log_max 1000000 $envargs $recargs -home $masterdir \
+ -errpfx MASTER $verbargs -txn $m_logargs \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(1) -rep_master]
+
+ # Open a client
+ repladd 2
+ set env_cmd(2) "berkdb_env_noerr -create $repmemargs \
+ -log_max 1000000 $envargs $recargs -home $clientdir \
+ -errpfx CLIENT $verbargs -txn $c_logargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(2) -rep_client]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.a: Add data to master, update client."
+ #
+ # This test uses a small page size and a large fixed_len
+ # so it is easy to force a page allocation.
+ set key [expr $niter + 1]
+ set data A
+ set pagesize 512
+ if { [is_fixed_length $method] == 1 } {
+ set bigdata [repeat $data [expr $pagesize / 2]]
+ } else {
+ set bigdata [repeat $data [expr 2 * $pagesize]]
+ }
+
+ set omethod [convert_method $method]
+ set testfile "test$tnum.db"
+ set db [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -pagesize $pagesize -env $masterenv $largs $testfile"]
+ eval rep_test $method $masterenv $db $niter 0 0 0 0 $largs
+ $masterenv txn_checkpoint
+ process_msgs $envlist
+
+ # Close client. Force a page allocation on the master.
+ # An overflow page (or big page, for hash) will do the job.
+ #
+ puts "\tRep$tnum.b: Close client, force page allocation on master."
+ error_check_good client_close [$clientenv close] 0
+
+ error_check_good client_verify \
+ [verify_dir $clientdir "\tRep$tnum.b: " 0 0 1 0 0] 0
+
+ set pages1 [r24_check_pages $db $method]
+ set txn [$masterenv txn]
+ error_check_good put_bigdata [eval {$db put} \
+ -txn $txn {$key [chop_data $method $bigdata]}] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # Verify that we have allocated new pages.
+ set pages2 [r24_check_pages $db $method]
+ set newpages [expr $pages2 - $pages1]
+
+ # Close master and discard messages for site 2. Now everybody
+ # is closed and sites 1 and 2 have different contents.
+ puts "\tRep$tnum.c: Close master."
+ error_check_good db_close [$db close] 0
+ error_check_good master_close [$masterenv close] 0
+ if { $newpages <= 0 } {
+ puts "FAIL: no new pages allocated."
+ return
+ }
+ error_check_good master_verify \
+ [verify_dir $masterdir "\tRep$tnum.c: " 0 0 1] 0
+
+ # Run a loop, opening the original client as master and the
+ # original master as client. Test db_verify.
+ foreach option { "no new data" "add new data" } {
+ puts "\tRep$tnum.d: Swap master and client ($option)."
+ set newmasterenv [eval $env_cmd(2) -rep_master]
+ set newclientenv [eval $env_cmd(1) -rep_client]
+ set newmasterdir [$newmasterenv get_home]
+ set newclientdir [$newclientenv get_home]
+ set envlist "{$newmasterenv 2} {$newclientenv 1}"
+ process_msgs $envlist
+ if { $option == "add new data" } {
+ set key [expr $niter + 2]
+ set db [eval "berkdb_open_noerr -create $omethod \
+ -auto_commit -pagesize $pagesize \
+ -env $newmasterenv $largs $testfile"]
+ set pages1 [r24_check_pages $db $method]
+ set txn [$newmasterenv txn]
+ error_check_good put_bigdata [eval {$db put} \
+ -txn $txn {$key [chop_data $method $bigdata]}] 0
+ error_check_good txn_commit [$txn commit] 0
+ set pages2 [r24_check_pages $db $method]
+ set newpages [expr $pages2 - $pages1]
+ error_check_good db_close [$db close] 0
+ process_msgs $envlist
+ }
+ puts "\tRep$tnum.e: Close master and client, run verify."
+ #
+ # Verify_dir will db_verify with its own private environment,
+ # which means any dirty pages still in our environment won't be
+ # noticed. So, make sure there are no dirty pages. Running
+ # checkpoint at the master flushes its cache, and replicating
+ # that checkpoint to the client makes the client flush its
+ # cache.
+ #
+ $newmasterenv txn_checkpoint
+ process_msgs $envlist
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+ if { $newpages <= 0 } {
+ puts "FAIL: no new pages allocated."
+ return
+ }
+ # This test can leave unreferenced pages on systems without
+ # FTRUNCATE and that's OK, so set unref to 0.
+ error_check_good verify \
+ [verify_dir $newmasterdir "\tRep$tnum.f: " 0 0 1 0 0] 0
+ error_check_good verify \
+ [verify_dir $newclientdir "\tRep$tnum.g: " 0 0 1 0 0] 0
+ }
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc r24_check_pages { db method } {
+ if { [is_hash $method] == 1 } {
+ set pages [stat_field $db stat "Number of big pages"]
+ } elseif { [is_queue $method] == 1 } {
+ set pages [stat_field $db stat "Number of pages"]
+ } else {
+ set pages [stat_field $db stat "Overflow pages"]
+ }
+ return $pages
+}
diff --git a/db-4.8.30/test/rep025.tcl b/db-4.8.30/test/rep025.tcl
new file mode 100644
index 0000000..96dc19e
--- /dev/null
+++ b/db-4.8.30/test/rep025.tcl
@@ -0,0 +1,237 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep025
+# TEST Test of DB_REP_JOIN_FAILURE.
+# TEST
+# TEST One master, one client.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST Delete client files and restart client.
+# TEST Put one more record to the master. At the next
+# TEST processing of messages, the client should get JOIN_FAILURE.
+# TEST Recover with a hot failover.
+#
+proc rep025 { method { niter 200 } { tnum "025" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Test of manual\
+ initialization and join failure $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep025_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep025_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs -log_max $log_max $verbargs -errpfx MASTER \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs -log_max $log_max $verbargs -errpfx CLIENT \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ # Find out what exists on the client. We need to loop until
+ # the first master log file > last client log file.
+ puts "\tRep$tnum.b: Close client."
+ if { $c_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive -l -h $clientdir]
+ }
+ set last_client_log [get_logfile $clientenv last]
+ error_check_good client_close [$clientenv close] 0
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory"} {
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.e: Clean client and reopen."
+ env_cleanup $clientdir
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2}"
+
+ # Set initialization to manual.
+ $clientenv rep_config {noautoinit on}
+ process_msgs $envlist 0 NONE err
+ error_check_good error_on_right_env [lindex $err 0] $clientenv
+ error_check_good right_error [is_substr $err DB_REP_JOIN_FAILURE] 1
+
+ # Add records to the master and update client.
+ puts "\tRep$tnum.f: Update master; client should return error."
+ #
+ # Force a log record to create a gap to force rerequest.
+ #
+ $masterenv txn_checkpoint -force
+ process_msgs $envlist 0 NONE err
+ tclsleep 1
+ set entries 100
+ eval rep_test $method $masterenv NULL $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+ error_check_good error_on_right_env [lindex $err 0] $clientenv
+ error_check_good right_error [is_substr $err DB_REP_JOIN_FAILURE] 1
+
+ # If the master logs and the databases are on-disk, copy from master
+ # to client and restart with recovery. If the logs or databases are
+ # in-memory, we'll have to re-enable internal initialization and
+ # restart the client.
+ if { $m_logtype == "on-disk" && $databases_in_memory == 0 } {
+ puts "\tRep$tnum.g: Hot failover and catastrophic recovery."
+ error_check_good client_close [$clientenv close] 0
+ env_cleanup $clientdir
+ set files [glob $masterdir/log.* $masterdir/*.db]
+ foreach f $files {
+ set filename [file tail $f]
+ file copy -force $f $clientdir/$filename
+ }
+ set clientenv [eval $cl_envcmd -recover_fatal -rep_client]
+ } else {
+ puts "\tRep$tnum.g: Restart client forcing internal init."
+ set clientenv [eval $cl_envcmd -rep_client]
+ $clientenv rep_config {noautoinit off}
+ }
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ error_check_good no_errors1 $err 0
+
+ # Adding another entry should not flush out an error.
+ eval rep_test $method $masterenv NULL $entries $start $start 0 $largs
+ process_msgs $envlist 0 NONE err
+ error_check_good no_errors2 $err 0
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep026.tcl b/db-4.8.30/test/rep026.tcl
new file mode 100644
index 0000000..6bc1259
--- /dev/null
+++ b/db-4.8.30/test/rep026.tcl
@@ -0,0 +1,294 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep026
+# TEST Replication elections - simulate a crash after sending
+# TEST a vote.
+
+proc rep026 { method args } {
+ source ./include.tcl
+
+ global mixed_mode_logging
+ global databases_in_memory
+ global repfiles_in_memory
+
+ set tnum "026"
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ # This test uses recovery, so mixed-mode testing and in-memory
+ # database testing aren't appropriate.
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+ if { $databases_in_memory == 1 } {
+ puts "Rep$tnum: Skipping for in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ set nclients 5
+ set logsets [create_logsets [expr $nclients + 1]]
+ foreach l $logsets {
+ puts "Rep$tnum ($method): Election generations -\
+ simulate crash after sending a vote $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep026_sub $method $nclients $tnum $l $args
+ }
+}
+
+proc rep026_sub { method nclients tnum logset largs } {
+ source ./include.tcl
+ global machids
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ set envlist {}
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 $verbargs \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_txnargs $m_logargs -rep_master \
+ -errpfx MASTER -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create $verbargs \
+ -event rep_event $repmemargs \
+ -home $clientdir($i) $c_txnargs($i) $c_logargs($i) \
+ -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ lappend envlist "$clientenv($i) $envid"
+ }
+ # Bring the clients online by processing the startup messages.
+ process_msgs $envlist
+
+ # Run a modified test001 in the master.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set niter 10
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ replclear [expr $i + 2]
+ set err_cmd($i) "none"
+ set crash($i) 0
+ set pri($i) 10
+ if { $rep_verbose == 1 } {
+ $clientenv($i) errpfx CLIENT$i
+ $clientenv($i) verbose $verbose_type on
+ $clientenv($i) errfile /dev/stderr
+ set env_cmd($i) [concat $env_cmd($i) \
+ "-errpfx CLIENT$i -errfile /dev/stderr"]
+ }
+ }
+
+ # In each case we simulate a crash in client C, recover, and
+ # call a second election. We vary the caller of the second
+ # election (C or some other client) and when the election
+ # messages from before the crash are processed - before or
+ # after the second election.
+ #
+ foreach option { "1 b before" "2 c before" "1 d after" "2 e after"} {
+ # Elector 1 calls the first election, elector 2
+ # calls the second election.
+ set elector1 1
+ set elector2 [lindex $option 0]
+ set let [lindex $option 1]
+ set restore [lindex $option 2]
+
+ if { $elector1 == $elector2 } {
+ puts "\tRep$tnum.$let: Simulated crash and recovery\
+ (crashing client calls second election)."
+ } else {
+ puts "\tRep$tnum.$let: Simulated crash and recovery\
+ (non-crashing client calls second election)."
+ }
+
+ puts "\tRep$tnum.$let: Process messages from crasher\
+ $restore 2nd election."
+
+ puts "\t\tRep$tnum.$let.1: Note egens for all clients."
+ # Note egens for all the clients.
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set egen($i) [stat_field \
+ $clientenv($i) rep_stat "Election generation number"]
+ }
+
+ # Call an election which simulates a crash after sending
+ # its VOTE1.
+ set msg "\tRep$tnum.$let.2"
+ puts "\t$msg: Start election, simulate a crash."
+ set nsites $nclients
+ set nvotes $nclients
+ # Make the winner the crashing client, since the
+ # crashing client will have the biggest LSN.
+ set elector 1
+ set winner $elector
+ set crash($elector) 1
+ setpriority pri $nclients $winner
+ set err_cmd($elector) "electvote1"
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $msg $elector $nsites $nvotes $nclients $winner 0 test.db
+ set msg "\tRep$tnum.$let.3"
+ puts "\t$msg: Close and reopen elector with recovery."
+ error_check_good \
+ clientenv_close($elector) [$clientenv($elector) close] 0
+
+ # Have other clients SKIP the election messages and process
+ # only C's startup messages. We'll do it by copying the files
+ # and emptying the originals.
+ set cwd [pwd]
+ foreach machid $machids {
+ file copy -force $qdir/repqueue$machid.db $qdir/save$machid.db
+ replclear $machid
+ }
+
+ # Reopen C and process messages. Only the startup messages
+ # will be available.
+ set clientenv($elector) [eval $env_cmd($elector) -recover]
+ set envlist [lreplace $envlist \
+ $elector $elector "$clientenv($elector) [expr $elector + 2]"]
+ process_msgs $envlist
+
+ # Verify egens (should be +1 in C, and unchanged
+ # in other clients).
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set newegen($i) [stat_field $clientenv($i) \
+ rep_stat "Election generation number"]
+ if { $i == $elector && $repfiles_in_memory == 0 } {
+ error_check_good \
+ egen+1 $newegen($i) [expr $egen($i) + 1]
+ } else {
+ error_check_good \
+ egen_unchanged $newegen($i) $egen($i)
+ }
+ }
+
+ # First chance to restore messages.
+ if { $restore == "before" } {
+ restore_messages $qdir
+ }
+
+ # Have C call an election (no crash simulation) and process
+ # all the messages.
+ set msg "\tRep$tnum.$let.4"
+ puts "\t$msg: Call second election."
+ set err_cmd($elector) "none"
+ set crash($elector) 0
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $msg $elector2 $nsites $nvotes $nclients $winner 1 test.db
+
+ # Second chance to restore messages.
+ if { $restore == "after" } {
+ restore_messages $qdir
+ }
+ process_msgs $envlist
+
+ # Verify egens (should be +2 or more in all clients).
+ puts "\t\tRep$tnum.$let.5: Check egens."
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ set clientenv($i) [lindex $pair 0]
+ set newegen($i) [stat_field \
+ $clientenv($i) rep_stat "Election generation number"]
+
+ # If rep files are in-memory, egen value must come
+ # from other sites instead of the egen file, and
+ # will not increase as quickly.
+ if { $repfiles_in_memory } {
+ set mingen [expr $egen($i) + 1]
+ } else {
+ set mingen [expr $egen($i) + 2]
+ }
+ error_check_good egen+more($i) \
+ [expr $newegen($i) >= $mingen] 1
+ }
+ }
+
+ # Clean up.
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc restore_messages { qdir } {
+ global machids
+ set cwd [pwd]
+ foreach machid $machids {
+ file copy -force $qdir/save$machid.db $qdir/repqueue$machid.db
+ }
+}
+
diff --git a/db-4.8.30/test/rep027.tcl b/db-4.8.30/test/rep027.tcl
new file mode 100644
index 0000000..bfd316f
--- /dev/null
+++ b/db-4.8.30/test/rep027.tcl
@@ -0,0 +1,189 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep027
+# TEST Replication and secondary indexes.
+# TEST
+# TEST Set up a secondary index on the master and make sure
+# TEST it can be accessed from the client.
+
+proc rep027 { method { niter 1000 } { tnum "027" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Renumbering recno is not permitted on a primary database.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_rrecno $method] != 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_rrecno $method] == 1 } {
+ puts "Skipping rep027 for -rrecno."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r):\
+ Replication and secondary indices $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep027_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep027_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global verbose_check_secondaries
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set omethod [convert_method $method]
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create $verbargs $repmemargs \
+ -log_max 1000000 -home $masterdir -errpfx MASTER \
+ $m_txnargs $m_logargs -rep_master -rep_transport \
+ \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create $verbargs $repmemargs \
+ $c_txnargs $c_logargs -home $clientdir -errpfx CLIENT \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C) $recargs]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Set up database and secondary index on master.
+ puts "\tRep$tnum.a: Set up database with secondary index."
+ set pname "primary$tnum.db"
+ set sname "secondary$tnum.db"
+
+ # Open the primary.
+ set pdb [eval {berkdb_open_noerr -create \
+ -auto_commit -env} $masterenv $omethod $largs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+ process_msgs $envlist
+
+ # Open and associate a secondary.
+ set sdb [eval {berkdb_open_noerr -create \
+ -auto_commit -env} $masterenv -btree $sname]
+ error_check_good second_open [is_valid_db $sdb] TRUE
+ error_check_good db_associate [$pdb associate [callback_n 0] $sdb] 0
+
+ # Propagate to client.
+ process_msgs $envlist
+
+ # Put some data in the master.
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $niter } { incr n } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $method $datum]
+
+ set ret [$pdb put $key [chop_data $method $datum]]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ process_msgs $envlist
+
+ # Check secondaries on master.
+ set verbose_check_secondaries 1
+ puts "\tRep$tnum.b: Check secondaries on master."
+ check_secondaries $pdb $sdb $niter keys data "Rep$tnum.b"
+ error_check_good pdb_close [$pdb close] 0
+ error_check_good sdb_close [$sdb close] 0
+ process_msgs $envlist
+
+ # Get handles on primary and secondary db on client.
+ set clientpdb [eval {berkdb_open -auto_commit -env} $clientenv $pname]
+ error_check_good client_pri [is_valid_db $clientpdb] TRUE
+ set clientsdb [eval {berkdb_open -auto_commit -env} $clientenv $sname]
+ error_check_good client_sec [is_valid_db $clientsdb] TRUE
+ error_check_good client_associate \
+ [$clientpdb associate [callback_n 0] $clientsdb] 0
+
+ # Check secondaries on client.
+ puts "\tRep$tnum.c: Check secondaries on client."
+ check_secondaries $clientpdb $clientsdb $niter keys data "Rep$tnum.c"
+
+ # Clean up.
+ error_check_good clientpdb_close [$clientpdb close] 0
+ error_check_good clientsdb_close [$clientsdb close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep$tnum.e: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep028.tcl b/db-4.8.30/test/rep028.tcl
new file mode 100644
index 0000000..dc3a9ca
--- /dev/null
+++ b/db-4.8.30/test/rep028.tcl
@@ -0,0 +1,248 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep028
+# TEST Replication and non-rep env handles. (Also see rep006.)
+# TEST
+# TEST Open second non-rep env on client, and create a db
+# TEST through this handle. Open the db on master and put
+# TEST some data. Check whether the non-rep handle keeps
+# TEST working. Also check if opening the client database
+# TEST in the non-rep env writes log records.
+#
+proc rep028 { method { niter 100 } { tnum "028" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "\tRep$tnum: Skipping for method $method."
+ return
+ }
+
+ # Skip test for HP-UX because we can't open an env twice.
+ if { $is_hp_test == 1 } {
+ puts "\tRep$tnum: Skipping for HP-UX."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ set clopts { "create" "open" }
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ foreach c $clopts {
+ puts "Rep$tnum ($method $r $c): Replication\
+ and non-rep env handles $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep028_sub $method $niter $tnum $l $r $c $args
+ }
+ }
+ }
+}
+
+proc rep028_sub { method niter tnum logset recargs clargs largs } {
+ source ./include.tcl
+ global is_hp_test
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set omethod [convert_method $method]
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ puts "\tRep$tnum.a: Open replicated envs and non-replicated client env."
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create \
+ -log_max 1000000 -home $masterdir $verbargs $repmemargs \
+ $m_txnargs $m_logargs -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -home $clientdir $verbargs $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C) $recargs]
+
+ # Open 2nd non-replication handle on client env, and create
+ # a db. Note, by not specifying any subsystem args, we
+ # do a DB_JOINENV, which is what we want.
+ set nonrepenv [eval {berkdb_env_noerr -home $clientdir}]
+ error_check_good nonrepenv [is_valid_env $nonrepenv] TRUE
+
+ # Set up databases in-memory or on-disk.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ # If we're testing create, verify that if a non-rep client
+ # creates a database before the master does, then when that
+ # client goes to use it, it gets DB_DEAD_HANDLE.
+ #
+ if { $clargs == "create" } {
+ puts "\tRep$tnum.b: Create database non-replicated."
+ set let c
+ set nextlet d
+ set nonrepdb [eval berkdb_open_noerr -auto_commit \
+ -create $omethod -env $nonrepenv $dbname]
+ error_check_good nonrepdb_open [is_valid_db $nonrepdb] TRUE
+ tclsleep 2
+ } else {
+ set let b
+ set nextlet c
+ }
+
+ #
+ # Now declare the clientenv a client.
+ #
+ puts "\tRep$tnum.$let: Declare env as rep client"
+ error_check_good client [$clientenv rep_start -client] 0
+ if { $clargs == "create" } {
+ #
+ # We'll only catch this error if we turn on no-autoinit.
+ # Otherwise, the system will throw away everything on the
+ # client and resync.
+ #
+ $clientenv rep_config {noautoinit on}
+ }
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ #
+ # In the create case, we'll detect the non-rep log records and
+ # determine this client was never part of the replication group.
+ #
+ if { $clargs == "create" } {
+ error_check_good errchk [is_substr $err \
+ "DB_REP_JOIN_FAILURE"] 1
+ error_check_good close [$nonrepdb close] 0
+ } else {
+ # Open the same db through the master handle. Put data
+ # and process messages.
+ set db [eval berkdb_open_noerr \
+ -create $omethod -env $masterenv -auto_commit $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ eval rep_test $method $masterenv $db $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ #
+ # If we're the open case, we want to just read the existing
+ # database through a non-rep readonly handle. Doing so
+ # should not create log records on the client (but has
+ # in the past).
+ #
+ puts "\tRep$tnum.$nextlet: Open and read database"
+ set nonrepdb [eval berkdb_open \
+ -rdonly -env $nonrepenv $dbname]
+ error_check_good nonrepdb_open [is_valid_db $nonrepdb] TRUE
+ #
+ # If opening wrote log records, we need to process
+ # some more on the client to notice the end of log
+ # is now in an unexpected place.
+ #
+ eval rep_test $method $masterenv $db $niter 0 0 0 $largs
+ process_msgs $envlist
+ error_check_good close [$nonrepdb close] 0
+
+ # By passing in "NULL" for the database name, we compare
+ # only the master and client logs, not the databases.
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 0 1 NULL
+
+# set stat [catch {eval exec $util_path/db_printlog \
+# -h $masterdir > $masterdir/prlog} result]
+# error_check_good stat_mprlog $stat 0
+# set stat [catch {eval exec $util_path/db_printlog \
+# -h $clientdir > $clientdir/prlog} result]
+# error_check_good stat_cprlog $stat 0
+# error_check_good log_cmp \
+# [filecmp $masterdir/prlog $clientdir/prlog] 0
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+
+ # Check that databases are in-memory or on-disk as expected.
+ check_db_location $nonrepenv
+ check_db_location $masterenv
+ check_db_location $clientenv
+ }
+
+ error_check_good nonrepenv_close [$nonrepenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep029.tcl b/db-4.8.30/test/rep029.tcl
new file mode 100644
index 0000000..48a844d
--- /dev/null
+++ b/db-4.8.30/test/rep029.tcl
@@ -0,0 +1,292 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep029
+# TEST Test of internal initialization.
+# TEST
+# TEST One master, one client.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST Delete client files and restart client.
+# TEST Put one more record to the master.
+#
+proc rep029 { method { niter 200 } { tnum "029" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+ global passwd
+ global has_crypto
+
+ set args [convert_args $method $args]
+ set saved_args $args
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ set opts { bulk clean noclean }
+ foreach r $test_recopts {
+ foreach c $opts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum ($method $envargs $r $c $args):\
+ Test of internal initialization $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep029_sub $method $niter $tnum $envargs \
+ $l $r $c $args
+
+ # Skip encrypted tests if not supported.
+ if { $has_crypto == 0 || $databases_in_memory } {
+ continue
+ }
+
+ # Run same set of tests with security.
+ #
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+ puts "Rep$tnum ($method $envargs $r $c $args):\
+ Test of internal initialization $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep029_sub $method $niter $tnum $envargs \
+ $l $r $c $args
+ }
+ }
+ }
+}
+
+proc rep029_sub { method niter tnum envargs logset recargs opts largs } {
+ global testdir
+ global passwd
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs -log_max $log_max $envargs $verbargs \
+ -errpfx MASTER -home $masterdir \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs -log_max $log_max $envargs $verbargs \
+ -errpfx CLIENT -home $clientdir \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist 0 NONE err
+ error_check_good process_msgs $err 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set enc "-P $passwd"
+ } else {
+ set enc ""
+ }
+
+ # Find out what exists on the client. We need to loop until
+ # the first master log file > last client log file.
+ puts "\tRep$tnum.b: Close client."
+ if { $c_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive $enc -l -h $clientdir]
+ }
+ set last_client_log [get_logfile $clientenv last]
+ error_check_good client_close [$clientenv close] 0
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client). Each time
+ # through the loop, start at the same $start value (i.e., don't
+ # increment $start). Since all we're trying to do is to fill up
+ # the log, it doesn't matter whether we insert new records or
+ # rewrite existing ones. But if we insert new records we might
+ # fill up the cache, which is fatal in the in-memory database
+ # case.
+ #
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory"} {
+ set res [eval exec $util_path/db_archive $enc -d -h $masterdir]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.e: Reopen client ($opts)."
+ if { $opts == "clean" } {
+ env_cleanup $clientdir
+ }
+ if { $opts == "bulk" } {
+ error_check_good bulk [$masterenv rep_config {bulk on}] 0
+ }
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ error_check_good process_msgs $err 0
+ if { $opts != "clean" } {
+ puts "\tRep$tnum.e.1: Trigger log request"
+ #
+ # When we don't clean, starting the client doesn't
+ # trigger any events. We need to generate some log
+ # records so that the client requests the missing
+ # logs and that will trigger it.
+ #
+ set entries 10
+ eval rep_test\
+ $method $masterenv NULL $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+ error_check_good process_msgs $err 0
+ }
+
+ puts "\tRep$tnum.f: Verify databases"
+ #
+ # If doing bulk testing, turn it off now so that it forces us
+ # to flush anything currently in the bulk buffer. We need to
+ # do this because rep_test might have aborted a transaction on
+ # its last iteration and those log records would still be in
+ # the bulk buffer causing the log comparison to fail.
+ #
+ if { $opts == "bulk" } {
+ puts "\tRep$tnum.f.1: Turn off bulk transfers."
+ error_check_good bulk [$masterenv rep_config {bulk off}] 0
+ process_msgs $envlist 0 NONE err
+ error_check_good process_msgs $err 0
+ }
+
+ #
+ # !!! This test CANNOT use rep_verify for logs due to encryption.
+ # Just compare databases. We either have to copy in
+ # all the code in rep_verify to adjust the beginning LSN
+ # or skip the log check for just this one test.
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 0
+
+ # Add records to the master and update client.
+ puts "\tRep$tnum.g: Add more records and check again."
+ set entries 10
+ eval rep_test $method $masterenv NULL $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+ error_check_good process_msgs $err 0
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 0
+
+ set bulkxfer [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ if { $opts == "bulk" } {
+ error_check_bad bulkxferon $bulkxfer 0
+ } else {
+ error_check_good bulkxferoff $bulkxfer 0
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
diff --git a/db-4.8.30/test/rep030.tcl b/db-4.8.30/test/rep030.tcl
new file mode 100644
index 0000000..0016cf2
--- /dev/null
+++ b/db-4.8.30/test/rep030.tcl
@@ -0,0 +1,388 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep030
+# TEST Test of internal initialization multiple files and pagesizes.
+# TEST Hold some databases open on master.
+# TEST
+# TEST One master, one client using a data_dir for internal init.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST Delete client files and restart client.
+# TEST Put one more record to the master.
+#
+proc rep030 { method { niter 500 } { tnum "030" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning.
+ set opts { noclean clean bulk }
+ foreach r $test_recopts {
+ foreach c $opts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $c):\
+ Internal initialization - hold some\
+ databases open on master $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep030_sub $method $niter $tnum $l $r $c $args
+ }
+ }
+ }
+}
+
+proc rep030_sub { method niter tnum logset recargs opts largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set maxpg 16384
+ set log_max [expr $maxpg * 8]
+ set cache [expr $maxpg * 32 ]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Run internal init using a data directory
+ #
+ file mkdir $masterdir/data
+ file mkdir $masterdir/data2
+ file mkdir $clientdir/data
+ file mkdir $clientdir/data2
+ #
+ # Set it twice to test duplicates data_dirs as well
+ # as multiple, different data dirs
+ #
+ set data_diropts " -data_dir data -data_dir data -data_dir data2"
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $repmemargs \
+ $m_logargs -log_max $log_max -errpfx MASTER \
+ -cachesize { 0 $cache 1 } $data_diropts $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $repmemargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT \
+ -cachesize { 0 $cache 1 } $data_diropts $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ set startpgsz 512
+ set pglist ""
+ for { set pgsz $startpgsz } { $pgsz <= $maxpg } \
+ { set pgsz [expr $pgsz * 2] } {
+ lappend pglist $pgsz
+ }
+ set nfiles [llength $pglist]
+ puts "\tRep$tnum.a.0: Running rep_test $nfiles times in replicated env."
+ set dbopen ""
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ set pagesize [lindex $pglist $i]
+ set largs " -pagesize $pagesize "
+ eval rep_test $method $masterenv NULL $nentries $mult $mult \
+ 0 $largs
+ process_msgs $envlist
+
+ #
+ # Everytime we run 'rep_test' we create 'test.db'. So
+ # rename it each time through the loop.
+ #
+ set old "test.db"
+ set new "test.$i.db"
+ if { $databases_in_memory == 0 } {
+ error_check_good rename [$masterenv dbrename \
+ -auto_commit $old $new] 0
+ } else {
+ error_check_good inmem_rename [$masterenv dbrename \
+ "" $old $new] 0
+ }
+ process_msgs $envlist
+ #
+ # We want to keep some databases open so that we test the
+ # code finding the files in the data dir as well as finding
+ # them in dbreg list.
+ #
+ if { [expr $i % 2 ] == 0 } {
+ if { $databases_in_memory == 1 } {
+ set db [berkdb_open_noerr\
+ -env $masterenv "" $new]
+ } else {
+ set db [berkdb_open_noerr\
+ -env $masterenv $new]
+ }
+ error_check_good dbopen.$i [is_valid_db $db] TRUE
+ lappend dbopen $db
+ }
+ }
+
+ # Set up a few special databases too. We want one with a subdatabase
+ # and we want an empty database, in addition to in-memory dbs.
+
+ # Set up databases in-memory or on-disk as expected.
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ set emptyfile { "" "empty.db" }
+ } else {
+ set testfile "test.db"
+ set emptyfile "empty.db"
+ }
+
+ if { [is_queue $method] } {
+ set sub ""
+ } else {
+ set sub "subdb"
+ }
+
+ set omethod [convert_method $method]
+ set largs " -pagesize $maxpg "
+ set largs [convert_args $method $largs]
+
+ #
+ # Create/close an empty database.
+ #
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit -create \
+ -mode 0644} $largs $omethod $emptyfile]
+ error_check_good emptydb [is_valid_db $db] TRUE
+ error_check_good empty_close [$db close] 0
+ #
+ # If we're not using in-mem named databases, open a subdb and
+ # keep it open. (Do a regular db if method is queue.)
+ # We need it a few times later on.
+ #
+ if { $databases_in_memory } {
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ } else {
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile $sub]
+ }
+ error_check_good subdb [is_valid_db $db] TRUE
+ set start 0
+ eval rep_test $method $masterenv $db $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ # First save the log number of the latest client log.
+ set last_client_log [get_logfile $clientenv last]
+ error_check_good client_close [$clientenv close] 0
+
+ # Run rep_test in the master (don't update client).
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv $db $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory"} {
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.e: Reopen client ($opts)."
+ if { $opts == "clean" } {
+ env_cleanup $clientdir
+ file mkdir $clientdir/data
+ file mkdir $clientdir/data2
+ }
+ if { $opts == "bulk" } {
+ error_check_good bulk [$masterenv rep_config {bulk on}] 0
+ }
+
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ if { $opts != "clean" } {
+ puts "\tRep$tnum.e.1: Trigger log request"
+ #
+ # When we don't clean, starting the client doesn't
+ # trigger any events. We need to generate some log
+ # records so that the client requests the missing
+ # logs and that will trigger it.
+ #
+ set entries 100
+ eval rep_test $method $masterenv $db $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+ }
+ error_check_good subdb_close [$db close] 0
+ process_msgs $envlist 0 NONE err
+
+ puts "\tRep$tnum.f: Verify logs and databases"
+ #
+ # If doing bulk testing, turn it off now so that it forces us
+ # to flush anything currently in the bulk buffer. We need to
+ # do this because rep_test might have aborted a transaction on
+ # its last iteration and those log records would still be in
+ # the bulk buffer causing the log comparison to fail.
+ #
+ if { $opts == "bulk" } {
+ puts "\tRep$tnum.f.1: Turn off bulk transfers."
+ error_check_good bulk [$masterenv rep_config {bulk off}] 0
+ process_msgs $envlist 0 NONE err
+ }
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv\
+ 1 1 1 test.db $masterdir/data
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 1 1 0 $dbname $masterdir/data
+ }
+
+ # Close the database held open on master for initialization.
+ foreach db $dbopen {
+ error_check_good db_close [$db close] 0
+ }
+
+ # Add records to the master and update client.
+ puts "\tRep$tnum.g: Add more records and check again."
+ set entries 10
+ if { $databases_in_memory } {
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -mode 0644} $largs $omethod $testfile]
+ } else {
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -mode 0644} $largs $omethod $testfile $sub]
+
+ }
+ error_check_good subdb [is_valid_db $db] TRUE
+ eval rep_test $method $masterenv $db $entries $niter 0 0 $largs
+ error_check_good subdb_close [$db close] 0
+ process_msgs $envlist 0 NONE err
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 1 1 0 test.db $masterdir/data
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 1 1 0 $dbname $masterdir/data
+ }
+ set bulkxfer [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ if { $opts == "bulk" } {
+ error_check_bad bulkxferon $bulkxfer 0
+ } else {
+ error_check_good bulkxferoff $bulkxfer 0
+ }
+
+ # Check that databases and logs are in-memory or on-disk as expected.
+ check_db_location $masterenv $dbname $masterdir/data
+ check_db_location $clientenv $dbname $clientdir/data
+
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep031.tcl b/db-4.8.30/test/rep031.tcl
new file mode 100644
index 0000000..60b5c00
--- /dev/null
+++ b/db-4.8.30/test/rep031.tcl
@@ -0,0 +1,345 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep031
+# TEST Test of internal initialization and blocked operations.
+# TEST
+# TEST One master, one client.
+# TEST Put one more record to the master.
+# TEST Test that internal initialization blocks:
+# TEST log_archive, rename, remove, fileid_reset, lsn_reset.
+# TEST Sleep 30+ seconds.
+# TEST Test that blocked operations are now unblocked.
+#
+proc rep031 { method { niter 200 } { tnum "031" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # There is nothing method-sensitive in this test, so
+ # skip for all except btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rep031 for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ set cleanopts { clean noclean }
+ foreach r $test_recopts {
+ foreach c $cleanopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $c $args):\
+ Test of internal init and blocked\
+ operations $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep031_sub $method $niter $tnum $l $r $c $args
+ }
+ }
+ }
+}
+
+proc rep031_sub { method niter tnum logset recargs clean largs } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -log_max $log_max $verbargs $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -log_max $log_max $verbargs $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ # Find out what exists on the client. We need to loop until
+ # the first master log file > last client log file.
+ if { $c_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive -l -h $clientdir]
+ }
+ set last_client_log [get_logfile $clientenv last]
+ error_check_good client_close [$clientenv close] 0
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory"} {
+ set res \
+ [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.e: Reopen client ($clean)."
+ if { $clean == "clean" } {
+ env_cleanup $clientdir
+ }
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ if { $clean == "noclean" } {
+ puts "\tRep$tnum.e.1: Trigger log request"
+ #
+ # When we don't clean, starting the client doesn't
+ # trigger any events. We need to generate some log
+ # records so that the client requests the missing
+ # logs and that will trigger it.
+ #
+ set entries 10
+ eval rep_test \
+ $method $masterenv NULL $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+ }
+
+ #
+ # We have now forced an internal initialization. Verify it is correct.
+ #
+ puts "\tRep$tnum.f: Verify logs and databases"
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ #
+ # Internal initializations disable certain operations on the master for
+ # 30 seconds after the last init-related message is received
+ # by the master. Those operations are dbremove, dbrename and
+ # log_archive (with removal).
+ #
+ puts "\tRep$tnum.g: Try to remove and rename the database."
+ set dbname "test.db"
+ set old $dbname
+ set new $dbname.new
+ if { $databases_in_memory } {
+ set stat [catch {$masterenv dbrename -auto_commit "" $old $new} ret]
+ } else {
+ set stat [catch {$masterenv dbrename -auto_commit $old $new} ret]
+ }
+ error_check_good rename_fail $stat 1
+ error_check_good rename_err [is_substr $ret "invalid"] 1
+ if { $databases_in_memory } {
+ set stat [catch {$masterenv dbremove -auto_commit "" $old} ret]
+ } else {
+ set stat [catch {$masterenv dbremove -auto_commit $old} ret]
+ }
+ error_check_good remove_fail $stat 1
+ error_check_good remove_err [is_substr $ret "invalid"] 1
+
+ # The fileid_reset and lsn_reset operations work on physical files
+ # so we do not need to test them for in-memory databases.
+ if { $databases_in_memory != 1 } {
+ puts "\tRep$tnum.h: Try to reset LSNs and fileid on the database."
+ set stat [catch {$masterenv id_reset $old} ret]
+ error_check_good id_reset $stat 1
+ error_check_good id_err [is_substr $ret "invalid"] 1
+ set stat [catch {$masterenv lsn_reset $old} ret]
+ error_check_good lsn_reset $stat 1
+ error_check_good lsn_err [is_substr $ret "invalid"] 1
+ }
+
+ #
+ # Need entries big enough to generate additional log files.
+ # However, db_archive will not return an error, it will
+ # just retain the log file.
+ #
+ puts "\tRep$tnum.i: Run rep_test to generate more logs."
+ set entries 200
+ eval rep_test $method $masterenv NULL $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+
+ # Test lockout of archiving only in on-disk case.
+ if { $m_logtype != "in-memory" } {
+ puts "\tRep$tnum.j: Try to db_archive."
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ set first [lindex $res 0]
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_bad log.gone [lsearch -exact $res $first] -1
+
+ puts "\tRep$tnum.j.0: Try to log_archive in master env."
+ set res [$masterenv log_archive -arch_remove]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_bad log.gone0 [lsearch -exact $res $first] -1
+
+ # We can't open a second handle on the env in HP-UX.
+ if { $is_hp_test != 1 } {
+ puts "\tRep$tnum.j.1: Log_archive in new non-rep env."
+ set newenv [berkdb_env_noerr -txn nosync \
+ -log_max $log_max -home $masterdir]
+ error_check_good newenv [is_valid_env $newenv] TRUE
+ set res [$newenv log_archive -arch_remove]
+ set res [eval exec \
+ $util_path/db_archive -l -h $masterdir]
+ error_check_bad \
+ log.gone1 [lsearch -exact $res $first] -1
+ }
+ }
+
+ # Check that databases are in-memory or on-disk as expected, before
+ # we try to delete the databases!
+ check_db_location $masterenv
+ check_db_location $clientenv
+
+ set timeout 30
+ #
+ # Sleep timeout+2 seconds - The timeout is 30 seconds, but we need
+ # to sleep a bit longer to make sure we cross the timeout.
+ #
+ set to [expr $timeout + 2]
+ puts "\tRep$tnum.k: Wait $to seconds to timeout"
+ tclsleep $to
+ puts "\tRep$tnum.l: Retry blocked operations after wait"
+ if { $databases_in_memory == 1 } {
+ set stat [catch {$masterenv dbrename -auto_commit "" $old $new} ret]
+ error_check_good rename_work $stat 0
+ set stat [catch {$masterenv dbremove -auto_commit "" $new} ret]
+ error_check_good remove_work $stat 0
+ } else {
+ set stat [catch {$masterenv id_reset $old} ret]
+ error_check_good id_reset_work $stat 0
+ set stat [catch {$masterenv lsn_reset $old} ret]
+ error_check_good lsn_reset_work $stat 0
+ set stat [catch {$masterenv dbrename -auto_commit $old $new} ret]
+ error_check_good rename_work $stat 0
+ set stat [catch {$masterenv dbremove -auto_commit $new} ret]
+ error_check_good remove_work $stat 0
+ }
+ process_msgs $envlist 0 NONE err
+
+ if { $m_logtype != "in-memory" } {
+ # Remove files via the 2nd non-rep env, check via db_archive.
+ if { $is_hp_test != 1 } {
+ set res [$newenv log_archive -arch_remove]
+ set res \
+ [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_good \
+ log.gone [lsearch -exact $res $first] -1
+ error_check_good newenv_close [$newenv close] 0
+ } else {
+ set res [$masterenv log_archive -arch_remove]
+ set res \
+ [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_good \
+ log.gone [lsearch -exact $res $first] -1
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep032.tcl b/db-4.8.30/test/rep032.tcl
new file mode 100644
index 0000000..735ff23
--- /dev/null
+++ b/db-4.8.30/test/rep032.tcl
@@ -0,0 +1,200 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep032
+# TEST Test of log gap processing.
+# TEST
+# TEST One master, one client.
+# TEST Run rep_test.
+# TEST Run rep_test without sending messages to client.
+# TEST Make sure client missing the messages catches up properly.
+#
+proc rep032 { method { niter 200 } { tnum "032" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ set opts { "" "bulk" }
+ foreach r $test_recopts {
+ foreach b $opts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r $b $args):\
+ Test of log gap processing $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep032_sub $method $niter $tnum $l $r $b $args
+ }
+ }
+ }
+}
+
+proc rep032_sub { method niter tnum logset recargs opts largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs $verbargs -home $masterdir -errpfx MASTER \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+ if { $opts == "bulk" } {
+ error_check_good bulk [$masterenv rep_config {bulk on}] 0
+ }
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs $verbargs -home $clientdir -errpfx CLIENT \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Check client processed everything properly."
+ set queued [stat_field $clientenv rep_stat "Maximum log records queued"]
+ set request1 [stat_field $clientenv rep_stat "Log records requested"]
+ error_check_good queued $queued 0
+
+ # Run rep_test in the master (don't update client).
+ # First run with dropping all client messages via replclear.
+ puts "\tRep$tnum.c: Running rep_test dropping client msgs."
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+ process_msgs $envlist
+
+ #
+ # Need new operations to force log gap processing to
+ # request missing pieces.
+ #
+ puts "\tRep$tnum.d: Running rep_test again replicated."
+ #
+ # Force a checkpoint to cause a gap to force rerequest.
+ #
+ $masterenv txn_checkpoint -force
+ process_msgs $envlist
+ tclsleep 1
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.e: Check we re-requested and had a backlog."
+ set queued [stat_field $clientenv rep_stat "Maximum log records queued"]
+ set request2 [stat_field $clientenv rep_stat "Log records requested"]
+ error_check_bad queued $queued 0
+ error_check_bad request $request1 $request2
+
+ puts "\tRep$tnum.f: Verify logs and databases"
+ #
+ # If doing bulk testing, turn it off now so that it forces us
+ # to flush anything currently in the bulk buffer. We need to
+ # do this because rep_test might have aborted a transaction on
+ # its last iteration and those log records would still be in
+ # the bulk buffer causing the log comparison to fail.
+ #
+ if { $opts == "bulk" } {
+ puts "\tRep$tnum.f.1: Turn off bulk transfers."
+ error_check_good bulk [$masterenv rep_config {bulk off}] 0
+ process_msgs $envlist 0 NONE err
+ }
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ set bulkxfer [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ if { $opts == "bulk" } {
+ error_check_bad bulkxferon $bulkxfer 0
+ } else {
+ error_check_good bulkxferoff $bulkxfer 0
+ }
+
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep033.tcl b/db-4.8.30/test/rep033.tcl
new file mode 100644
index 0000000..a6dd5f8
--- /dev/null
+++ b/db-4.8.30/test/rep033.tcl
@@ -0,0 +1,273 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep033
+# TEST Test of internal initialization with rename and remove of dbs.
+# TEST
+# TEST One master, one client.
+# TEST Generate several databases. Replicate to client.
+# TEST Do some renames and removes, both before and after
+# TEST closing the client.
+#
+proc rep033 { method { niter 200 } { tnum "033" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # This test depends on manipulating logs, so can not be run with
+ # in-memory logging.
+ global mixed_mode_logging
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning.
+ set envargs ""
+ set cleanopts { noclean clean }
+ set when { before after }
+ foreach r $test_recopts {
+ foreach c $cleanopts {
+ foreach w $when {
+ puts "Rep$tnum ($method $envargs $c $r $w $args):\
+ Test of internal initialization $msg $msg2."
+ rep033_sub $omethod $niter $tnum $envargs \
+ $r $c $w $args
+ }
+ }
+ }
+}
+
+proc rep033_sub { method niter tnum envargs recargs clean when largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create -txn nosync \
+ -log_buffer $log_buf -log_max $log_max $envargs \
+ -errpfx MASTER $verbargs $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create -txn nosync \
+ -log_buffer $log_buf -log_max $log_max $envargs \
+ -errpfx CLIENT $verbargs $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Set up for in-memory or on-disk databases.
+ if { $databases_in_memory } {
+ set memargs { "" }
+ } else {
+ set memargs ""
+ }
+
+ puts "\tRep$tnum.a: Create several databases on master."
+ set oflags " -env $masterenv $method -create -auto_commit "
+ set dbw [eval {berkdb_open_noerr} $oflags $largs $memargs w.db]
+ set dbx [eval {berkdb_open_noerr} $oflags $largs $memargs x.db]
+ set dby [eval {berkdb_open_noerr} $oflags $largs $memargs y.db]
+ set dbz [eval {berkdb_open_noerr} $oflags $largs $memargs z.db]
+ error_check_good dbw_close [$dbw close] 0
+ error_check_good dbx_close [$dbx close] 0
+ error_check_good dby_close [$dby close] 0
+ error_check_good dbz_close [$dbz close] 0
+
+ # Update client, then close.
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ error_check_good client_close [$clientenv close] 0
+
+ # If we're doing the rename/remove operations before adding
+ # databases A and B, manipulate only the existing files.
+ if { $when == "before" } {
+ rep033_rename_remove $masterenv
+ }
+
+ # Run rep_test in the master (don't update client).
+ #
+ # We'd like to control the names of these dbs, so give
+ # rep_test an existing handle.
+ #
+ puts "\tRep$tnum.c: Create new databases. Populate with rep_test."
+ set dba [eval {berkdb_open_noerr} $oflags $largs $memargs a.db]
+ set dbb [eval {berkdb_open_noerr} $oflags $largs $memargs b.db]
+ eval rep_test $method $masterenv $dba $niter 0 0 0 $largs
+ eval rep_test $method $masterenv $dbb $niter 0 0 0 $largs
+ error_check_good dba_close [$dba close] 0
+ error_check_good dbb_close [$dbb close] 0
+
+ # Throw away messages for client.
+ replclear 2
+
+ # If we're doing the rename/remove afterwards, manipulate
+ # all the files including A and B.
+ if { $when == "after" } {
+ rep033_rename_remove $masterenv
+ }
+ error_check_good rename_b [eval {$masterenv dbrename} $memargs b.db x.db] 0
+ error_check_good remove_a [eval {$masterenv dbremove} $memargs a.db] 0
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1
+
+ puts "\tRep$tnum.e: Reopen client ($clean)."
+ if { $clean == "clean" } {
+ env_cleanup $clientdir
+ }
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ if { $clean == "noclean" } {
+ puts "\tRep$tnum.e.1: Trigger log request"
+ #
+ # When we don't clean, starting the client doesn't
+ # trigger any events. We need to generate some log
+ # records so that the client requests the missing
+ # logs and that will trigger it.
+ #
+ set entries 10
+ eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs
+ process_msgs $envlist 0 NONE err
+ }
+
+ puts "\tRep$tnum.f: Verify logs and databases"
+ #
+ # By sending in a NULL for dbname, we only compare logs.
+ #
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1 NULL
+ #
+ # ... now the databases, manually. X, Y, and C should exist.
+ #
+ set dbnames "x.db w.db c.db"
+ foreach db $dbnames {
+ set db1 [eval \
+ {berkdb_open_noerr -env $masterenv} $largs -rdonly $memargs $db]
+ set db2 [eval \
+ {berkdb_open_noerr -env $clientenv} $largs -rdonly $memargs $db]
+
+ error_check_good compare:$db [db_compare \
+ $db1 $db2 $masterdir/$db $clientdir/$db] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ }
+
+ # A, B, and Z should be gone on client.
+ error_check_good dba_gone [file exists $clientdir/a.db] 0
+ error_check_good dbb_gone [file exists $clientdir/b.db] 0
+ #
+ # Currently we cannot remove z.db on the client because
+ # we don't own the file namespace. So, we cannot do
+ # the check below. If that changes, we want the test below.
+ error_check_good dbz_gone [file exists $clientdir/z.db] 0
+
+ # Clean up.
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep033_rename_remove { env } {
+ global databases_in_memory
+ if { $databases_in_memory } {
+ set memargs { "" }
+ } else {
+ set memargs ""
+ }
+
+ # Here we manipulate databases W, X, Y, and Z.
+ # Remove W.
+ error_check_good remove_w [eval $env dbremove $memargs w.db] 0
+
+ # Rename X to W, Y to C (an entirely new name).
+ error_check_good rename_x [eval $env dbrename $memargs x.db w.db] 0
+ error_check_good rename_y [eval $env dbrename $memargs y.db c.db] 0
+
+ # Remove Z.
+ error_check_good remove_z [eval $env dbremove $memargs z.db] 0
+}
diff --git a/db-4.8.30/test/rep034.tcl b/db-4.8.30/test/rep034.tcl
new file mode 100644
index 0000000..1ede13d
--- /dev/null
+++ b/db-4.8.30/test/rep034.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep034
+# TEST Test of STARTUPDONE notification.
+# TEST
+# TEST STARTUPDONE can now be recognized without the need for new "live" log
+# TEST records from the master (under favorable conditions). The response to
+# TEST the ALL_REQ at the end of synchronization includes an end-of-log marker
+# TEST that now triggers it. However, the message containing that end marker
+# TEST could get lost, so live log records still serve as a back-up mechanism.
+# TEST The end marker may also be set under c2c sync, but only if the serving
+# TEST client has itself achieved STARTUPDONE.
+#
+proc rep034 { method { niter 2 } { tnum "034" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+ foreach l $logsets {
+ puts "Rep$tnum ($method $args): Test of\
+ startup synchronization detection $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep034_sub $method $niter $tnum $l $args
+ }
+}
+
+# This test manages on its own the decision of whether or not to open an
+# environment with recovery. (It varies throughout the test.) Therefore there
+# is no need to run it twice (as we often do with a loop in the main proc).
+#
+proc rep034_sub { method niter tnum logset largs } {
+ global anywhere
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global startup_done
+ global rep_verbose
+ global verbose_type
+ global rep034_got_allreq
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # In first part of test master serves requests.
+ #
+ set anywhere 0
+
+ # Create a master; add some data.
+ #
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -event rep_event $verbargs -errpfx MASTER $repmemargs \
+ -home $masterdir -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd]
+ puts "\tRep$tnum.a: Create master; add some data."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ # Bring up a new client, and see that it can get STARTUPDONE with no new
+ # live transactions at the master.
+ #
+ puts "\tRep$tnum.b: Bring up client; check STARTUPDONE."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -event rep_event $verbargs -errpfx CLIENT $repmemargs \
+ -home $clientdir -rep_client -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd]
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ set startup_done 0
+ process_msgs $envlist
+
+ error_check_good done_without_live_txns \
+ [stat_field $clientenv rep_stat "Startup complete"] 1
+
+ # Test that the event got fired as well. In the rest of the test things
+ # get a little complex (what with having two clients), so only check the
+ # event part here. The important point is the various ways that
+ # STARTUPDONE can be computed, so testing the event firing mechanism
+ # just this once is enough.
+ #
+ error_check_good done_event_too $startup_done 1
+
+ #
+ # Bring up another client. Do additional new txns at master, ensure
+ # that STARTUPDONE is not triggered at NEWMASTER LSN.
+ #
+ puts "\tRep$tnum.c: Another client; no STARTUPDONE at NEWMASTER LSN."
+ set newmaster_lsn [next_expected_lsn $masterenv]
+ repladd 3
+ #
+ # !!! Please note that we're giving client2 a special customized version
+ # of the replication transport call-back function.
+ #
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $c2_logargs \
+ -event rep_event $verbargs -errpfx CLIENT2 $repmemargs \
+ -home $clientdir2 -rep_client -rep_transport \[list 3 rep034_send\]"
+ set client2env [eval $cl2_envcmd]
+
+ set envlist "{$masterenv 1} {$clientenv 2} {$client2env 3}"
+ set verified false
+ for {set i 0} {$i < 10} {incr i} {
+ proc_msgs_once $envlist
+ set client2lsn [next_expected_lsn $client2env]
+
+ # Get to the point where we've gone past where the master's LSN
+ # was at NEWMASTER time, and make sure we haven't yet gotten
+ # STARTUPDONE. Ten loop iterations should be plenty.
+ #
+ if {[$client2env log_compare $client2lsn $newmaster_lsn] > 0} {
+ if {![stat_field \
+ $client2env rep_stat "Startup complete"]} {
+ set verified true
+ }
+ break;
+ }
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ }
+ error_check_good no_newmaster_trigger $verified true
+
+ process_msgs $envlist
+ error_check_good done_during_live_txns \
+ [stat_field $client2env rep_stat "Startup complete"] 1
+
+ #
+ # From here on out we use client-to-client sync.
+ #
+ set anywhere 1
+
+ # Here we rely on recovery at client 1. If that client is running with
+ # in-memory logs or in-memory databases, forgo the remainder of the test.
+ #
+ if {$c_logtype eq "in-mem" || $databases_in_memory } {
+ puts "\tRep$tnum.d: Skip the rest of the test for\
+ in-memory logging or databases."
+ $masterenv close
+ $clientenv close
+ $client2env close
+ replclose $testdir/MSGQUEUEDIR
+ return
+ }
+
+ # Shut down client 1. Bring it back, with recovery. Verify that it can
+ # get STARTUPDONE by syncing to other client, even with no new master
+ # txns.
+ #
+ puts "\tRep$tnum.d: Verify STARTUPDONE using c2c sync."
+ $clientenv close
+ set clientenv [eval $cl_envcmd -recover]
+ set envlist "{$masterenv 1} {$clientenv 2} {$client2env 3}"
+
+ # Clear counters at client2, so that we can check "Client service
+ # requests" in a moment.
+ #
+ $client2env rep_stat -clear
+ process_msgs $envlist
+ error_check_good done_via_c2c \
+ [stat_field $clientenv rep_stat "Startup complete"] 1
+ #
+ # Make sure our request was served by client2. This isn't a test of c2c
+ # sync per se, but if this fails it indicates that we're not really
+ # testing what we thought we were testing.
+ #
+ error_check_bad c2c_served_by_master \
+ [stat_field $client2env rep_stat "Client service requests"] 0
+
+ # Verify that we don't get STARTUPDONE if we are using c2c sync to
+ # another client, and the serving client has not itself reached
+ # STARTUPDONE, because that suggests that the serving client could be
+ # way far behind. But that we can still eventually get STARTUPDONE, as
+ # a fall-back, once the master starts generating new txns again.
+ #
+ # To do so, we'll need to restart both clients. Start with the client
+ # that will serve the request. Turn off "anywhere" process for a moment
+ # so that we can get this client set up without having the other one
+ # running.
+ #
+ # Now it's client 2 that needs recovery. Forgo the rest of the test if
+ # it is logging in memory. (We could get this far in mixed mode, with
+ # client 1 logging on disk.)
+ #
+ if {$c2_logtype eq "in-mem"} {
+ puts "\tRep$tnum.e: Skip rest of test for in-memory logging."
+ $masterenv close
+ $clientenv close
+ $client2env close
+ replclose $testdir/MSGQUEUEDIR
+ return
+ }
+ puts "\tRep$tnum.e: Check no STARTUPDONE when c2c server is behind."
+ $clientenv log_flush
+ $clientenv close
+ $client2env log_flush
+ $client2env close
+
+ set anywhere 0
+ set client2env [eval $cl2_envcmd -recover]
+ set envlist "{$masterenv 1} {$client2env 3}"
+
+ # We want client2 to get partway through initialization, but once it
+ # sends the ALL_REQ to the master, we want to cut things off there.
+ # Recall that we gave client2 a special "wrapper" version of the
+ # replication transport call-back function: that function will set a
+ # flag when it sees an ALL_REQ message go by.
+ #
+ set rep034_got_allreq false
+ while { !$rep034_got_allreq } {
+ proc_msgs_once $envlist
+ }
+
+ #
+ # To make sure we're doing a valid test, verify that we really did
+ # succeed in getting the serving client into the state we intended.
+ #
+ error_check_good serve_from_notstarted \
+ [stat_field $client2env rep_stat "Startup complete"] 0
+
+ # Start up the client to be tested. Make sure it doesn't get
+ # STARTUPDONE (yet). Again, the checking of service request stats is
+ # just for test debugging, to make sure we have a valid test.
+ #
+ # To add insult to injury, not only do we not get STARTUPDONE from the
+ # "behind" client, we also don't even get all the log records we need
+ # (because we didn't allow client2's ALL_REQ to get to the master).
+ # And no mechanism to let us know that. The only resolution is to wait
+ # for gap detection to rerequest (which would then go to the master).
+ # So, set a small rep_request upper bound, so that it doesn't take a ton
+ # of new live txns to reach the trigger.
+ #
+ set anywhere 1
+ $client2env rep_stat -clear
+ replclear 2
+ set clientenv [eval $cl_envcmd -recover]
+ #
+ # Set to 400 usecs. An average ping to localhost should
+ # be a few 10s usecs.
+ #
+ $clientenv rep_request 400 400
+ set envlist "{$masterenv 1} {$clientenv 2} {$client2env 3}"
+
+ # Here we're expecting that the master isn't generating any new log
+ # records, which is normally the case since we're not generating any new
+ # transactions there. This is important, because otherwise the client
+ # could notice its log gap and request the missing records, resulting in
+ # STARTUPDONE before we're ready for it. When debug_rop is on, just
+ # scanning the data-dir during UPDATE_REQ processing (which, remember,
+ # now happens just to check for potential NIMDB re-materialization)
+ # generates log records, as we open each file we find to see if it's a
+ # database. So, filter out LOG messages (simulating them being "lost")
+ # temporarily.
+ #
+ if {[is_substr [berkdb getconfig] "debug_rop"]} {
+ $masterenv rep_transport {1 rep034_send_nolog}
+ }
+ while {[rep034_proc_msgs_once $masterenv $clientenv $client2env] > 0} {}
+ $masterenv rep_transport {1 replsend}
+
+ error_check_good not_from_undone_c2c_client \
+ [stat_field $clientenv rep_stat "Startup complete"] 0
+
+ error_check_bad c2c_served_by_master \
+ [stat_field $client2env rep_stat "Client service requests"] 0
+
+ # Verify that we nevertheless *do* get STARTUPDONE after the master
+ # starts generating new txns again. Generate two sets of transactions,
+ # with an unmistakable pause between, to ensure that we trigger the
+ # client's rerequest timer, which we need in order to pick up the
+ # missing transactions. The 400 usec is a nice short time; but on
+ # Windows sometimes it's possible to blast through a single process_msgs
+ # cycle so quickly that its low-resolution timer reflects no elapsed
+ # time at all!
+ #
+ puts "\tRep$tnum.f: Check STARTUPDONE via fall-back to live txns."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+ tclsleep 1
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+ error_check_good fallback_live_txns \
+ [stat_field $clientenv rep_stat "Startup complete"] 1
+
+ $masterenv close
+ $clientenv close
+ $client2env close
+ replclose $testdir/MSGQUEUEDIR
+ set anywhere 0
+}
+
+# Do a round of message processing, but juggle things such that client2 can
+# never receive a message from the master.
+#
+# Assumes the usual "{$masterenv 1} {$clientenv 2} {$client2env 3}" structure.
+#
+proc rep034_proc_msgs_once { masterenv clientenv client2env } {
+ set nproced [proc_msgs_once "{$masterenv 1}" NONE err]
+ error_check_good pmonce_1 $err 0
+ replclear 3
+
+ incr nproced [proc_msgs_once "{$clientenv 2} {$client2env 3}" NONE err]
+ error_check_good pmonce_2 $err 0
+
+ return $nproced
+}
+
+# Wrapper for replsend. Mostly just a pass-through to the real replsend, except
+# we watch for an ALL_REQ, and just set a flag when we see it.
+#
+proc rep034_send { control rec fromid toid flags lsn } {
+ global rep034_got_allreq
+
+ if {[berkdb msgtype $control] eq "all_req"} {
+ set rep034_got_allreq true
+ }
+ return [replsend $control $rec $fromid $toid $flags $lsn]
+}
+
+# Another slightly different wrapper for replsend. This one simulates losing
+# any broadcast LOG messages from the master.
+#
+proc rep034_send_nolog { control rec fromid toid flags lsn } {
+ if {[berkdb msgtype $control] eq "log" &&
+ $fromid == 1 && $toid == -1} {
+ set result 0
+ } else {
+ set result [replsend $control $rec $fromid $toid $flags $lsn]
+ }
+ return $result
+}
diff --git a/db-4.8.30/test/rep035.tcl b/db-4.8.30/test/rep035.tcl
new file mode 100644
index 0000000..39666ef
--- /dev/null
+++ b/db-4.8.30/test/rep035.tcl
@@ -0,0 +1,294 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep035
+# TEST Test sync-up recovery in replication.
+# TEST
+# TEST We need to fork off 3 child tclsh processes to operate
+# TEST on Site 3's (client always) home directory:
+# TEST Process 1 continually calls lock_detect.
+# TEST Process 2 continually calls txn_checkpoint.
+# TEST Process 3 continually calls memp_trickle.
+# TEST Process 4 continually calls log_archive.
+# TEST Sites 1 and 2 will continually swap being master
+# TEST (forcing site 3 to continually run sync-up recovery)
+# TEST New master performs 1 operation, replicates and downgrades.
+
+proc rep035 { method { niter 100 } { tnum "035" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set saved_args $args
+ set logsets [create_logsets 3]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum: Test sync-up recovery ($method) $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep035_sub $method $niter $tnum $envargs $l $args
+ }
+}
+
+proc rep035_sub { method niter tnum envargs logset largs } {
+ source ./include.tcl
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir1 $testdir/CLIENTDIR1
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir1
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create $verbargs $repmemargs \
+ -log_max 1000000 $envargs -home $masterdir $m_logargs \
+ -errpfx MASTER -errfile /dev/stderr $m_txnargs -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set env1 [eval $env_cmd(M)]
+
+ # Open two clients
+ repladd 2
+ set env_cmd(C1) "berkdb_env_noerr -create $verbargs $repmemargs \
+ -log_max 1000000 $envargs -home $clientdir1 $c_logargs \
+ -errfile /dev/stderr -errpfx CLIENT $c_txnargs -rep_client \
+ -rep_transport \[list 2 replsend\]"
+ set env2 [eval $env_cmd(C1)]
+
+ # Second client needs lock_detect flag.
+ repladd 3
+ set env_cmd(C2) "berkdb_env_noerr -create $verbargs $repmemargs \
+ -log_max 1000000 $envargs -home $clientdir2 $c2_logargs \
+ -errpfx CLIENT2 -errfile /dev/stderr $c2_txnargs -rep_client \
+ -lock_detect default -rep_transport \[list 3 replsend\]"
+ set env3 [eval $env_cmd(C2)]
+ error_check_good client_env [is_valid_env $env3] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$env1 1} {$env2 2} {$env3 3}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $env1 test force noarchive_timeout
+
+ # We need to fork off 3 child tclsh processes to operate
+ # on Site 3's (client always) home directory:
+ # Process 1 continually calls lock_detect (DB_LOCK_DEFAULT)
+ # Process 2 continually calls txn_checkpoint (DB_FORCE)
+ # Process 3 continually calls memp_trickle (large % like 90)
+ # Process 4 continually calls log_archive.
+
+ puts "\tRep$tnum.a: Fork child process running lock_detect on client2."
+ set pid1 [exec $tclsh_path $test_path/wrap.tcl \
+ rep035script.tcl $testdir/lock_detect.log \
+ $clientdir2 detect &]
+
+ puts "\tRep$tnum.b:\
+ Fork child process running txn_checkpoint on client2."
+ set pid2 [exec $tclsh_path $test_path/wrap.tcl \
+ rep035script.tcl $testdir/txn_checkpoint.log \
+ $clientdir2 checkpoint &]
+
+ puts "\tRep$tnum.c: Fork child process running memp_trickle on client2."
+ set pid3 [exec $tclsh_path $test_path/wrap.tcl \
+ rep035script.tcl $testdir/memp_trickle.log \
+ $clientdir2 trickle &]
+
+ puts "\tRep$tnum.d: Fork child process running log_archive on client2."
+ set pid4 [exec $tclsh_path $test_path/wrap.tcl \
+ rep035script.tcl $testdir/log_archive.log \
+ $clientdir2 archive &]
+
+ # Pause a bit to let the children get going.
+ tclsleep 5
+
+ set logfilelist [list lock_detect.log \
+ txn_checkpoint.log memp_trickle.log log_archive.log]
+ set pidlist [list $pid1 $pid2 $pid3 $pid4]
+
+ #
+ # Sites 1 and 2 will continually swap being master
+ # forcing site 3 to continually run sync-up recovery.
+ # New master performs 1 operation, replicates and downgrades.
+ # Site 3 will always stay a client.
+ #
+ # Set up all the master/client data we're going to need
+ # to keep track of and swap. Set up the handles for rep_test.
+ #
+
+ set masterenv $env1
+ set mid 1
+ set clientenv $env2
+ set cid 2
+
+ # Set up databases as in-memory or on-disk as specified.
+ if { $databases_in_memory } {
+ set testfile { "" "test$tnum.db" }
+ } else {
+ set testfile "test$tnum.db"
+ }
+
+ set args [convert_args $method]
+ set omethod [convert_method $method]
+ set mdb_cmd "{berkdb_open_noerr} -env $masterenv -auto_commit \
+ -create $omethod $args -mode 0644 $testfile"
+ set cdb_cmd "{berkdb_open_noerr} -env $clientenv -auto_commit \
+ $omethod $args -mode 0644 $testfile"
+
+ set masterdb [eval $mdb_cmd]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+ process_msgs $envlist
+
+ set clientdb [eval $cdb_cmd]
+ error_check_good dbopen [is_valid_db $clientdb] TRUE
+
+ tclsleep 2
+ puts "\tRep$tnum.e: Swap master and client $niter times."
+ for { set i 0 } { $i < $niter } { incr i } {
+
+ # Do a few ops
+ eval rep_test $method $masterenv $masterdb 2 $i $i 0 $largs
+ set envlist "{$masterenv $mid} {$clientenv $cid} {$env3 3}"
+ process_msgs $envlist
+
+ # Do one op on master and process messages and drop
+ # to clientenv to force sync-up recovery next time.
+ eval rep_test $method $masterenv $masterdb 1 $i $i 0 $largs
+ set envlist "{$masterenv $mid} {$env3 3}"
+ replclear $cid
+ process_msgs $envlist
+
+ # Swap all the info we need.
+ set tmp $masterenv
+ set masterenv $clientenv
+ set clientenv $tmp
+
+ set tmp $masterdb
+ set masterdb $clientdb
+ set clientdb $tmp
+
+ set tmp $mid
+ set mid $cid
+ set cid $tmp
+
+ set tmp $mdb_cmd
+ set mdb_cmd $cdb_cmd
+ set cdb_cmd $tmp
+
+ puts "\tRep$tnum.e.$i: Swap: master $mid, client $cid"
+ error_check_good downgrade [$clientenv rep_start -client] 0
+ error_check_good upgrade [$masterenv rep_start -master] 0
+ set envlist "{$masterenv $mid} {$clientenv $cid} {$env3 3}"
+ process_msgs $envlist
+
+ # Close old and reopen since we will get HANDLE_DEAD
+ # otherwise because we dropped messages to the new master.
+ error_check_good masterdb [$masterdb close] 0
+ error_check_good clientdb [$clientdb close] 0
+
+ set masterdb [eval $mdb_cmd]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ set clientdb [eval $cdb_cmd]
+ error_check_good dbopen [is_valid_db $clientdb] TRUE
+ process_msgs $envlist
+ }
+
+ # Communicate with child processes by creating a marker file.
+ set markerenv [berkdb_env_noerr -create -home $testdir -txn]
+ error_check_good markerenv_open [is_valid_env $markerenv] TRUE
+ set marker [eval "berkdb_open_noerr \
+ -create -btree -auto_commit -env $markerenv marker.db"]
+ error_check_good marker_close [$marker close] 0
+
+ # Wait for child processes; they should shut down quickly.
+ watch_procs $pidlist 1
+
+ # There should not be any messages in the log files.
+ # If there are, print them out.
+ foreach file $logfilelist {
+ puts "\tRep$tnum.f: Checking $file for errors."
+ set fd [open $testdir/$file r]
+ while { [gets $fd str] != -1 } {
+ error "FAIL: found message $str"
+ }
+ }
+
+ error_check_good masterdb [$masterdb close] 0
+ error_check_good clientdb [$clientdb close] 0
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+ error_check_good env3_close [$env3 close] 0
+ error_check_good markerenv_close [$markerenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep035script.tcl b/db-4.8.30/test/rep035script.tcl
new file mode 100644
index 0000000..c8fc351
--- /dev/null
+++ b/db-4.8.30/test/rep035script.tcl
@@ -0,0 +1,81 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep035 script - continually calls lock_detect, txn_checkpoint,
+# or mpool_trickle.
+#
+# Usage: repscript clientdir apicall
+# clientdir: client env directory
+# apicall: detect, checkpoint, or trickle.
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript clientdir apicall"
+
+# Verify usage
+if { $argc != 2 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set clientdir [ lindex $argv 0 ]
+set apicall [ lindex $argv 1 ]
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# Join the client env.
+repladd 3
+set envid 3
+set cl2_cmd "berkdb_env_noerr -home $clientdir \
+ -errfile /dev/stderr -errpfx CLIENT.$apicall \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+# set cl2_cmd "berkdb_env_noerr -home $clientdir \
+# -errfile /dev/stderr -errpfx CLIENT.$apicall \
+# -verbose {rep on} \
+# -txn -rep_client -rep_transport \[list $envid replsend\]"
+set clientenv [eval $cl2_cmd]
+error_check_good script_c2env_open [is_valid_env $clientenv] TRUE
+
+# Run chosen call continuously until the parent script creates
+# a marker file to indicate completion.
+switch -exact -- $apicall {
+ archive {
+ while { [file exists $testdir/marker.db] == 0 } {
+ $clientenv log_archive -arch_remove
+# tclsleep 1
+ }
+ }
+ detect {
+ while { [file exists $testdir/marker.db] == 0 } {
+ $clientenv lock_detect default
+# tclsleep 1
+ }
+ }
+ checkpoint {
+ while { [file exists $testdir/marker.db] == 0 } {
+ $clientenv txn_checkpoint -force
+ tclsleep 1
+ }
+ }
+ trickle {
+ while { [file exists $testdir/marker.db] == 0 } {
+ $clientenv mpool_trickle 90
+# tclsleep 1
+ }
+ }
+ default {
+ puts "FAIL: unrecognized API call $apicall
+ }
+}
+
+error_check_good clientenv_close [$clientenv close] 0
+
diff --git a/db-4.8.30/test/rep036.tcl b/db-4.8.30/test/rep036.tcl
new file mode 100644
index 0000000..e8ce36b
--- /dev/null
+++ b/db-4.8.30/test/rep036.tcl
@@ -0,0 +1,209 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep036
+# TEST Multiple master processes writing to the database.
+# TEST One process handles all message processing.
+
+proc rep036 { method { niter 200 } { tnum "036" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ set saved_args $args
+ set logsets [create_logsets 3]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum: Test sync-up recovery ($method) $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep036_sub $method $niter $tnum $envargs $l $args
+ }
+}
+
+proc rep036_sub { method niter tnum envargs logset args } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer.
+ # We always run this test with -txn, so don't adjust txnargs.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create $verbargs $repmemargs \
+ -log_max 1000000 $envargs -home $masterdir $m_logargs \
+ -errpfx MASTER -errfile /dev/stderr -txn -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set env1 [eval $env_cmd(M)]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create $verbargs $repmemargs \
+ -log_max 1000000 $envargs -home $clientdir $c_logargs \
+ -errfile /dev/stderr -errpfx CLIENT -txn -rep_client \
+ -rep_transport \[list 2 replsend\]"
+ set env2 [eval $env_cmd(C)]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$env1 1} {$env2 2}"
+ process_msgs $envlist
+
+# # Start up deadlock detector.
+# # Commented out, as are two more sections below - see [#15049].
+# set dpid [eval {exec $util_path/db_deadlock} \
+# -a o -v -t 2.0 -h $masterdir >& $testdir/dd.parent.out &]
+
+ # Set up master database.
+ set testfile "rep$tnum.db"
+ set omethod [convert_method $method]
+ set mdb [eval {berkdb_open_noerr} -env $env1 -auto_commit \
+ -create -mode 0644 $omethod $testfile]
+ error_check_good dbopen [is_valid_db $mdb] TRUE
+
+ # Put a record in the master database.
+ set key MAIN_KEY
+ set string MAIN_STRING
+ set t [$env1 txn]
+ error_check_good txn [is_valid_txn $t $env1] TRUE
+ set txn "-txn $t"
+
+ set ret [eval \
+ {$mdb put} $txn {$key [chop_data $method $string]}]
+ error_check_good mdb_put $ret 0
+ error_check_good txn_commit [$t commit] 0
+
+ # Fork two writers that write to the master.
+ set pidlist {}
+ foreach writer { 1 2 } {
+ puts "\tRep$tnum.a: Fork child process WRITER$writer."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep036script.tcl $testdir/rep036script.log.$writer \
+ $masterdir $writer $niter btree &]
+ lappend pidlist $pid
+ }
+
+ # Run the main loop until the writers signal completion.
+ set i 0
+ while { [file exists $testdir/1.db] == 0 && \
+ [file exists $testdir/2.db] == 0 } {
+ set string MAIN_STRING.$i
+
+ set t [$env1 txn]
+ error_check_good txn [is_valid_txn $t $env1] TRUE
+ set txn "-txn $t"
+ set ret [eval \
+ {$mdb put} $txn {$key [chop_data $method $string]}]
+
+# # Writing to this database can deadlock. If we do, let the
+# # deadlock detector break the lock, wait a second, and try again.
+# while { [catch {eval {$mdb put}\
+# $txn {$key [chop_data $method $string]}} ret] } {
+# # Make sure the failure is a deadlock.
+# error_check_good deadlock [is_substr $ret DB_LOCK_DEADLOCK] 1
+# tclsleep 1
+# }
+
+
+ error_check_good mdb_put $ret 0
+ error_check_good txn_commit [$t commit] 0
+
+ if { [expr $i % 10] == 0 } {
+ puts "\tRep036.c: Wrote MAIN record $i"
+ }
+ incr i
+
+ # Process messages.
+ process_msgs $envlist
+
+ # Wait a while, then do it all again.
+ tclsleep 1
+ }
+
+
+ # Confirm that the writers are done and process the messages
+ # once more to be sure the client is caught up.
+ watch_procs $pidlist 1
+ process_msgs $envlist
+
+# # We are done with the deadlock detector.
+# error_check_good kill_deadlock_detector [tclkill $dpid] ""
+
+ puts "\tRep$tnum.c: Verify logs and databases"
+ # Check that master and client logs and dbs are identical.
+ # Logs first ...
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $masterdir > $masterdir/prlog} result]
+ error_check_good stat_mprlog $stat 0
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $clientdir > $clientdir/prlog} result]
+ error_check_good mdb [$mdb close] 0
+ error_check_good stat_cprlog $stat 0
+# error_check_good log_cmp \
+# [filecmp $masterdir/prlog $clientdir/prlog] 0
+
+ # ... now the databases.
+ set db1 [eval {berkdb_open_noerr -env $env1 -rdonly $testfile}]
+ set db2 [eval {berkdb_open_noerr -env $env2 -rdonly $testfile}]
+
+ error_check_good comparedbs [db_compare \
+ $db1 $db2 $masterdir/$testfile $clientdir/$testfile] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep036script.tcl b/db-4.8.30/test/rep036script.tcl
new file mode 100644
index 0000000..fd40822
--- /dev/null
+++ b/db-4.8.30/test/rep036script.tcl
@@ -0,0 +1,125 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep036 script - create additional writers in master env.
+#
+# Usage: masterdir writerid
+# masterdir: Directory of replication master
+# writerid: i.d. number for writer
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+global rand_init
+set usage "repscript masterdir writerid nentries method"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set masterdir [ lindex $argv 0 ]
+set writerid [ lindex $argv 1 ]
+set nentries [ lindex $argv 2 ]
+set method [ lindex $argv 3 ]
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# We need to set up our own machid.
+repladd 1
+repladd 2
+
+# Start up deadlock detector.
+# Commented out - see #15049.
+#set dpid [eval {exec $util_path/db_deadlock} \
+# -a o -v -t 2.0 -h $masterdir >& $testdir/dd.writer.$writerid.out &]
+
+# Join the master env.
+set envid 1
+set env_cmd "berkdb_env_noerr -home $masterdir \
+ -errfile /dev/stderr -errpfx WRITER.$writerid \
+ -txn -rep_master -rep_transport \[list $envid replsend\]"
+# set env_cmd "berkdb_env_noerr -home $masterdir \
+# -errfile /dev/stderr -errpfx WRITER.$writerid \
+# -verbose {rep on} \
+# -txn -rep_master -rep_transport \[list $envid replsend\]"
+set masterenv [eval $env_cmd]
+error_check_good script_env_open [is_valid_env $masterenv] TRUE
+
+# Open database.
+set testfile "rep036.db"
+set omethod [convert_method $method]
+set mdb [eval {berkdb_open_noerr} -env $masterenv -auto_commit \
+ -create $omethod $testfile]
+error_check_good dbopen [is_valid_db $mdb] TRUE
+
+# Write records to the database.
+set did [open $dict]
+set count 0
+set dictsize 10000
+berkdb srand $rand_init
+while { $count < $nentries } {
+ #
+ # If nentries exceeds the dictionary size, close
+ # and reopen to start from the beginning again.
+ if { [expr [expr $count + 1] % $dictsize] == 0 } {
+ close $did
+ set did [open $dict]
+ }
+
+ gets $did str
+ set key WRITER.$writerid.$str
+ set str [reverse $str]
+
+ set t [$masterenv txn]
+ error_check_good txn [is_valid_txn $t $masterenv] TRUE
+ set txn "-txn $t"
+
+# If using deadlock detection, uncomment this and comment the
+# following put statement.
+# # Writing to this database can deadlock. If we do, let the
+# # deadlock detector break the lock, wait a second, and try again.
+# while { [catch {eval {$mdb put}\
+# $txn {$key [chop_data $method $str]}} ret] } {
+# error_check_good deadlock [is_substr $ret DB_LOCK_DEADLOCK] 1
+# tclsleep 1
+# }
+
+ set ret [eval \
+ {$mdb put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ if { [expr $count % 100] == 1 } {
+ puts "Wrote WRITER.$writerid record $count"
+ set sleep [berkdb random_int 0 10]
+ puts "Writer.$writerid sleeping $sleep seconds"
+ tclsleep $sleep
+ }
+ incr count
+}
+close $did
+
+# Clean up.
+# Uncomment following line if using deadlock detector.
+#error_check_good kill_deadlock_detector [tclkill $dpid] ""
+error_check_good mdb_close [$mdb close] 0
+error_check_good masterenv_close [$masterenv close] 0
+replclose $testdir/MSGQUEUEDIR
+
+# Communicate with parent by creating a marker file.
+set markerenv [berkdb_env -create -home $testdir -txn]
+error_check_good markerenv_open [is_valid_env $markerenv] TRUE
+set marker [eval "berkdb_open \
+ -create -btree -auto_commit -env $markerenv $writerid.db"]
+error_check_good marker_close [$marker close] 0
+error_check_good markerenv_close [$markerenv close] 0
diff --git a/db-4.8.30/test/rep037.tcl b/db-4.8.30/test/rep037.tcl
new file mode 100644
index 0000000..df574d8
--- /dev/null
+++ b/db-4.8.30/test/rep037.tcl
@@ -0,0 +1,248 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep037
+# TEST Test of internal initialization and page throttling.
+# TEST
+# TEST One master, one client, force page throttling.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST Delete client files and restart client.
+# TEST Put one more record to the master.
+# TEST Verify page throttling occurred.
+#
+proc rep037 { method { niter 1500 } { tnum "037" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set saved_args $args
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning.
+ set cleanopts { bulk clean noclean }
+ foreach r $test_recopts {
+ foreach c $cleanopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ set args $saved_args
+ puts "Rep$tnum ($method $c $r $args): Test of\
+ internal init with page throttling $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep037_sub $method $niter $tnum $l $r $c $args
+ }
+ }
+ }
+}
+
+proc rep037_sub { method niter tnum logset recargs clean largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ #
+ # If using bulk processing, just use clean. We could add
+ # another control loop to do bulk+clean and then bulk+noclean
+ # but that seems like overkill.
+ #
+ set bulk 0
+ if { $clean == "bulk" } {
+ set bulk 1
+ set clean "clean"
+ }
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs -log_max $log_max -errpfx MASTER $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+ $masterenv rep_limit 0 [expr 32 * 1024]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ if { $bulk } {
+ error_check_good set_bulk [$masterenv rep_config {bulk on}] 0
+ }
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ if { $c_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive -l -h $clientdir]
+ }
+ set last_client_log [get_logfile $clientenv last]
+ error_check_good client_close [$clientenv close] 0
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory"} {
+ set res \
+ [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ # Make sure that we have a gap between the last client
+ # log and the first master log.
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.e: Reopen client ($clean)."
+ if { $clean == "clean" } {
+ env_cleanup $clientdir
+ }
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ if { $clean == "noclean" } {
+ puts "\tRep$tnum.e.1: Trigger log request"
+ #
+ # When we don't clean, starting the client doesn't
+ # trigger any events. We need to generate some log
+ # records so that the client requests the missing
+ # logs and that will trigger it.
+ #
+ set entries 10
+ eval rep_test \
+ $method $masterenv NULL $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+ }
+
+ puts "\tRep$tnum.f: Verify logs and databases"
+ set verify_subset \
+ [expr { $m_logtype == "in-memory" || $c_logtype == "in-memory" }]
+ rep_verify $masterdir $masterenv\
+ $clientdir $clientenv $verify_subset 1 1
+
+ puts "\tRep$tnum.g: Verify throttling."
+ if { $niter > 1000 } {
+ set nthrottles \
+ [stat_field $masterenv rep_stat "Transmission limited"]
+ error_check_bad nthrottles $nthrottles -1
+ error_check_bad nthrottles $nthrottles 0
+ }
+
+ # Make sure log files are on-disk or not as expected.
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep038.tcl b/db-4.8.30/test/rep038.tcl
new file mode 100644
index 0000000..5b97d7c
--- /dev/null
+++ b/db-4.8.30/test/rep038.tcl
@@ -0,0 +1,293 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep038
+# TEST Test of internal initialization and ongoing master updates.
+# TEST
+# TEST One master, one client.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST Delete client files and restart client.
+# TEST Put more records on master while initialization is in progress.
+#
+proc rep038 { method { niter 200 } { tnum "038" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with various options, such as in-memory databases,
+ # forcing an archive during the middle of init, and normal.
+ # Skip recovery with in-memory logging - it doesn't make sense.
+ set testopts { normal archive }
+ foreach r $test_recopts {
+ foreach t $testopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $t $r $args): Test of\
+ internal init with new records $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep038_sub $method $niter $tnum $l $r $t $args
+ }
+ }
+ }
+}
+
+proc rep038_sub { method niter tnum logset recargs testopt largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs -log_max $log_max -errpfx MASTER $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+ $masterenv rep_limit 0 0
+
+ # Run rep_test in the master only.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+ set omethod [convert_method $method]
+ set dbargs [convert_args $method $largs]
+ set mdb [eval {berkdb_open} -env $masterenv -auto_commit\
+ -create -mode 0644 $omethod $dbargs $testfile ]
+ error_check_good reptest_db [is_valid_db $mdb] TRUE
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master beyond the first log file.
+ eval rep_test\
+ $method $masterenv $mdb $niter $start $start 0 $largs
+ incr start $niter
+
+ puts "\tRep$tnum.a.1: Run db_archive on master."
+ if { $m_logtype == "on-disk" } {
+ set res \
+ [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ #
+ # Make sure we have moved beyond the first log file.
+ #
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > 1 } {
+ set stop 1
+ }
+
+ }
+
+ puts "\tRep$tnum.b: Open client."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ $clientenv rep_limit 0 0
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ #
+ # We want to simulate a master continually getting new
+ # records while an update is going on. Simulate that
+ # for several iterations and then let the messages finish
+ # all their processing.
+ #
+ set loop 10
+ set i 0
+ set entries 100
+ set archived 0
+ set start $niter
+ set init 0
+ while { $i < $loop } {
+ set nproced 0
+ set start [expr $start + $entries]
+ eval rep_test \
+ $method $masterenv $mdb $entries $start $start 0 $largs
+ incr start $entries
+ incr nproced [proc_msgs_once $envlist NONE err]
+ error_check_bad nproced $nproced 0
+ #
+ # If we are testing archiving, we need to make sure that
+ # the first_lsn for internal init (the last log file we
+ # have when we first enter init) is no longer available.
+ # So, the first time through we record init_log, and then
+ # on subsequent iterations we'll wait for the last log
+ # to move further. Force a checkpoint and archive.
+ #
+ if { $testopt == "archive" && $archived == 0 } {
+ set clstat [exec $util_path/db_stat \
+ -N -r -R A -h $clientdir]
+ if { $init == 0 && \
+ [is_substr $clstat "REP_F_RECOVER_PAGE"] } {
+ set init_log [get_logfile $masterenv last]
+ set init 1
+ }
+ if { $init == 0 && \
+ [is_substr $clstat "REP_F_RECOVER_LOG"] } {
+ set init_log [get_logfile $masterenv last]
+ set init 1
+ }
+ set last_master_log [get_logfile $masterenv last]
+ set first_master_log [get_logfile $masterenv first]
+ if { $init && $m_logtype == "on-disk" && \
+ $last_master_log > $init_log } {
+ $masterenv txn_checkpoint -force
+ $masterenv test force noarchive_timeout
+ set res [eval exec $util_path/db_archive \
+ -d -h $masterdir]
+ set newlog [get_logfile $masterenv first]
+ set archived 1
+ error_check_good logs \
+ [expr $newlog > $init_log] 1
+ } elseif { $init && $m_logtype == "in-memory" && \
+ $first_master_log > $init_log } {
+ $masterenv txn_checkpoint -force
+ $masterenv test force noarchive_timeout
+ set archived 1
+ }
+ }
+ incr i
+ }
+ set cdb [eval {berkdb_open_noerr} -env $clientenv -auto_commit\
+ -create -mode 0644 $omethod $dbargs $testfile]
+ error_check_good reptest_db [is_valid_db $cdb] TRUE
+ process_msgs $envlist
+
+ puts "\tRep$tnum.c: Verify logs and databases"
+ if { $databases_in_memory } {
+ rep038_verify_inmem $masterenv $clientenv $mdb $cdb
+ } else {
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1
+ }
+
+ # Add records to the master and update client.
+ puts "\tRep$tnum.d: Add more records and check again."
+ eval rep_test $method $masterenv $mdb $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+ if { $databases_in_memory } {
+ rep038_verify_inmem $masterenv $clientenv $mdb $cdb
+ } else {
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1
+ }
+
+ # Make sure log file are on-disk or not as expected.
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good mdb_close [$mdb close] 0
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep038_verify_inmem { masterenv clientenv mdb cdb } {
+ #
+ # Can't use rep_verify to compare the logs because each
+ # commit record from db_printlog shows the database name
+ # as text on the master and as the file uid on the client
+ # because the client cannot find the "file".
+ #
+ # !!! Check the LSN first. Otherwise the DB->stat for the
+ # number of records will write a log record on the master if
+ # the build is configured for debug_rop. Work around that issue.
+ #
+ set mlsn [next_expected_lsn $masterenv]
+ set clsn [next_expected_lsn $clientenv]
+ error_check_good lsn $mlsn $clsn
+
+ set mrecs [stat_field $mdb stat "Number of records"]
+ set crecs [stat_field $cdb stat "Number of records"]
+ error_check_good recs $mrecs $crecs
+}
diff --git a/db-4.8.30/test/rep039.tcl b/db-4.8.30/test/rep039.tcl
new file mode 100644
index 0000000..552e2d3
--- /dev/null
+++ b/db-4.8.30/test/rep039.tcl
@@ -0,0 +1,472 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep039
+# TEST Test of interrupted internal initialization. The
+# TEST interruption is due to a changed master, or the client crashing,
+# TEST or both.
+# TEST
+# TEST One master, two clients.
+# TEST Generate several log files. Remove old master log files.
+# TEST Restart client, optionally having "cleaned" client env dir. Either
+# TEST way, this has the effect of forcing an internal init.
+# TEST Interrupt the internal init.
+# TEST Vary the number of times we process messages to make sure
+# TEST the interruption occurs at varying stages of the first internal
+# TEST initialization.
+# TEST
+# TEST Run for btree and queue only because of the number of permutations.
+# TEST
+proc rep039 { method { niter 200 } { tnum "039" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ # Run for btree and queue methods only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || \
+ [is_queue $method] == 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 && [is_queue $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree, non-queue method."
+ return
+ }
+
+ # Skip for mixed-mode logging -- this test has a very large
+ # set of iterations already.
+ global mixed_mode_logging
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed mode logging."
+ return
+ }
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning.
+ set cleanopts { noclean clean }
+ set archopts { archive noarchive }
+ set nummsgs 4
+ set announce {puts "Rep$tnum ($method $r $clean $a $crash $l $args):\
+ Test of internal init. $i message iters. \
+ Test $cnt of $maxtest tests $with recovery $msg $msg2."}
+ foreach r $test_recopts {
+ if { $r == "-recover" && ! $is_windows_test && ! $is_hp_test } {
+ set crashopts { master_change client_crash both }
+ } else {
+ set crashopts { master_change }
+ }
+ # Only one of the three sites in the replication group needs to
+ # be tested with in-memory logs: the "client under test".
+ #
+ if { $r == "-recover" } {
+ set cl_logopts { on-disk }
+ set with "with"
+ } else {
+ set cl_logopts { on-disk in-memory }
+ set with "without"
+ }
+ set maxtest [expr [llength $crashopts] * \
+ [llength $cleanopts] * \
+ [llength $archopts] * \
+ [llength $cl_logopts] * \
+ [expr $nummsgs]]
+ set cnt 1
+ foreach crash $crashopts {
+ foreach clean $cleanopts {
+ foreach a $archopts {
+ foreach l $cl_logopts {
+ for { set i 1 } \
+ { $i <= $nummsgs } \
+ { incr i } {
+ eval $announce
+ rep039_sub $method \
+ $niter $tnum $r \
+ $clean $a $crash \
+ $l $i $args
+ incr cnt
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+proc rep039_sub \
+ { method niter tnum recargs clean archive crash cl_logopt pmsgs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set master_change false
+ set client_crash false
+ if { $crash == "master_change" } {
+ set master_change true
+ } elseif { $crash == "client_crash" } {
+ set client_crash true
+ } elseif { $crash == "both" } {
+ set master_change true
+ set client_crash true
+ } else {
+ error "FAIL:[timestamp] '$crash' is an unrecognized crash type"
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ # This test has three replication sites: a master, a client whose
+ # behavior is under test, and another client. We'll call them
+ # "A", "B" and "C". At one point during the test, we may (depending on
+ # the setting of $master_change) switch roles between the master and the
+ # other client.
+ #
+ # The initial site/role assignments are as follows:
+ #
+ # A = master
+ # B = client under test
+ # C = other client
+ #
+ # In the case where we do switch roles, the roles become:
+ #
+ # A = other client
+ # B = client under test (no change here)
+ # C = master
+ #
+ # Although the real names are A, B, and C, we'll use mnemonic names
+ # whenever possible. In particular, this means that we'll have to
+ # re-jigger the mnemonic names after the role switch.
+
+ file mkdir [set dirs(A) $testdir/SITE_A]
+ file mkdir [set dirs(B) $testdir/SITE_B]
+ file mkdir [set dirs(C) $testdir/SITE_C]
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ # Set up the three sites: A, B, and C will correspond to EID's
+ # 1, 2, and 3 in the obvious way. As we start out, site A is always the
+ # master.
+ #
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn nosync \
+ $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_A \
+ -home $dirs(A) -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set txn_arg [adjust_txnargs $cl_logopt]
+ set log_arg [adjust_logargs $cl_logopt]
+ if { $cl_logopt == "on-disk" } {
+ # Override in this case, because we want to specify log_buffer.
+ set log_arg "-log_buffer $log_buf"
+ }
+ set env_B_cmd "berkdb_env_noerr -create $txn_arg \
+ $verbargs $repmemargs \
+ $log_arg -log_max $log_max -errpfx SITE_B \
+ -home $dirs(B) -rep_transport \[list 2 replsend\]"
+ set envs(B) [eval $env_B_cmd $recargs -rep_client]
+
+ # Open 2nd client
+ repladd 3
+ set env_C_cmd "berkdb_env_noerr -create -txn nosync \
+ $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_C \
+ -home $dirs(C) -rep_transport \[list 3 replsend\]"
+ set envs(C) [eval $env_C_cmd $recargs -rep_client]
+
+ # Turn off throttling for this test.
+ foreach site [array names envs] {
+ $envs($site) rep_limit 0 0
+ }
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+
+ # Set up the (indirect) mnemonic role names for the first part of the
+ # test.
+ set master A
+ set test_client B
+ set other C
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $envs($master) test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $envs($master) NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ error_check_good client_close [$envs($test_client) close] 0
+
+ set res [eval exec $util_path/db_archive -l -h $dirs($test_client)]
+ set last_client_log [lindex [lsort $res] end]
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test $method $envs($master) NULL $niter 0 0 0 $largs
+ #
+ # Clear messages for first client. We want that site
+ # to get far behind.
+ #
+ replclear 2
+ puts "\tRep$tnum.d: Run db_archive on master."
+ set res [eval exec $util_path/db_archive -d -h $dirs($master)]
+ set res [eval exec $util_path/db_archive -l -h $dirs($master)]
+ if { [lsearch -exact $res $last_client_log] == -1 } {
+ set stop 1
+ }
+ }
+
+ set envlist "{$envs($master) 1} {$envs($other) 3}"
+ process_msgs $envlist
+
+ if { $archive == "archive" } {
+ puts "\tRep$tnum.d: Run db_archive on other client."
+ set res [eval exec $util_path/db_archive -l -h $dirs($other)]
+ error_check_bad \
+ log.1.present [lsearch -exact $res log.0000000001] -1
+ set res [eval exec $util_path/db_archive -d -h $dirs($other)]
+ set res [eval exec $util_path/db_archive -l -h $dirs($other)]
+ error_check_good \
+ log.1.gone [lsearch -exact $res log.0000000001] -1
+ } else {
+ puts "\tRep$tnum.d: Skipping db_archive on other client."
+ }
+
+ puts "\tRep$tnum.e: Reopen test client ($clean)."
+ if { $clean == "clean" } {
+ env_cleanup $dirs($test_client)
+ }
+
+ # (The test client is always site B, EID 2.)
+ #
+ set envs(B) [eval $env_B_cmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $envs(B)] TRUE
+ $envs(B) rep_limit 0 0
+
+ # Hold an open database handle while doing internal init, to make sure
+ # no back lock interactions are happening. But only do so some of the
+ # time, and of course only if it's reasonable to expect the database to
+ # exist at this point. (It won't, if we're using in-memory databases
+ # and we've just started the client with recovery, since recovery blows
+ # away the mpool.) Set up database as in-memory or on-disk first.
+ #
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ set have_db [expr {$recargs != "-recover"}]
+ } else {
+ set dbname "test.db"
+ set have_db true
+ }
+
+ if {$clean == "noclean" && $have_db && [berkdb random_int 0 1] == 1} {
+ puts "\tRep$tnum.g: Hold open db handle from client app."
+ set cdb [eval\
+ {berkdb_open_noerr -env} $envs($test_client) $dbname]
+ error_check_good dbopen [is_valid_db $cdb] TRUE
+ set ccur [$cdb cursor]
+ error_check_good curs [is_valid_cursor $ccur $cdb] TRUE
+ set ret [$ccur get -first]
+ set kd [lindex $ret 0]
+ set key [lindex $kd 0]
+ error_check_good cclose [$ccur close] 0
+ } else {
+ puts "\tRep$tnum.g: (No client app handle will be held.)"
+ set cdb "NONE"
+ }
+
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ proc_msgs_once $envlist
+
+ #
+ # We want to simulate a master continually getting new
+ # records while an update is going on.
+ #
+ set entries 10
+ eval rep_test $method $envs($master) NULL $entries $niter 0 0 $largs
+ #
+ # We call proc_msgs_once N times to get us into page recovery:
+ # 1. Send master messages and client finds master.
+ # 2. Master replies and client does verify.
+ # 3. Master gives verify_fail and client does update_req.
+ # 4. Master send update info and client does page_req.
+ #
+ # We vary the number of times we call proc_msgs_once (via pmsgs)
+ # so that we test switching master at each point in the
+ # internal initialization processing.
+ #
+ set nproced 0
+ puts "\tRep$tnum.f: Get partially through initialization ($pmsgs iters)"
+ for { set i 1 } { $i < $pmsgs } { incr i } {
+ incr nproced [proc_msgs_once $envlist]
+ }
+
+ if { [string is true $master_change] } {
+ replclear 1
+ replclear 3
+ puts "\tRep$tnum.g: Downgrade/upgrade master."
+
+ # Downgrade the existing master to a client, switch around the
+ # roles, and then upgrade the newly appointed master.
+ error_check_good downgrade [$envs($master) rep_start -client] 0
+
+ set master C
+ set other A
+
+ error_check_good upgrade [$envs($master) rep_start -master] 0
+ }
+
+ # Simulate a client crash: simply abandon the handle without closing it.
+ # Note that this doesn't work on Windows, because there you can't remove
+ # a file if anyone (including yourself) has it open. This also does not
+ # work on HP-UX, because there you are not allowed to open a second
+ # handle on an env.
+ #
+ # Note that crashing only makes sense with "-recover".
+ #
+ if { [string is true $client_crash] } {
+ error_check_good assert [string compare $recargs "-recover"] 0
+
+ set abandoned_env $envs($test_client)
+ set abandoned true
+
+ set envs($test_client) [eval $env_B_cmd $recargs -rep_client]
+ $envs($test_client) rep_limit 0 0
+
+ # Again, remember: whatever the current roles, a site and its EID
+ # stay linked always.
+ #
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ } else {
+ set abandoned false
+ }
+
+ process_msgs $envlist
+ #
+ # Now simulate continual updates to the new master. Each
+ # time through we just process messages once before
+ # generating more updates.
+ #
+ set niter 10
+ for { set i 0 } { $i < $niter } { incr i } {
+ set nproced 0
+ set start [expr $i * $entries]
+ eval rep_test $method $envs($master) NULL $entries $start \
+ $start 0 $largs
+ incr nproced [proc_msgs_once $envlist]
+ error_check_bad nproced $nproced 0
+ }
+ set start [expr $i * $entries]
+ process_msgs $envlist
+
+ puts "\tRep$tnum.h: Verify logs and databases"
+ # Whether or not we've switched roles, it's always site A that may have
+ # had its logs archived away. When the $init_test flag is turned on,
+ # rep_verify allows the site in the second position to have
+ # (more-)archived logs, so we have to abuse the calling signature a bit
+ # here to get this to work. (I.e., even when A is still master and C is
+ # still the other client, we have to pass things in this order so that
+ # the $init_test different-sized-logs trick can work.)
+ #
+ set init_test 1
+ rep_verify $dirs(C) $envs(C) $dirs(A) $envs(A) $init_test
+
+ # Process messages again in case we are running with debug_rop.
+ process_msgs $envlist
+ rep_verify $dirs($master) $envs($master) \
+ $dirs($test_client) $envs($test_client) $init_test
+
+ # Add records to the master and update client.
+ puts "\tRep$tnum.i: Add more records and check again."
+ set entries 10
+ eval rep_test $method $envs($master) NULL $entries $start \
+ $start 0 $largs
+ process_msgs $envlist 0 NONE err
+
+ # Check again that everyone is identical.
+ rep_verify $dirs(C) $envs(C) $dirs(A) $envs(A) $init_test
+ process_msgs $envlist
+ rep_verify $dirs($master) $envs($master) \
+ $dirs($test_client) $envs($test_client) $init_test
+
+ if {$cdb != "NONE"} {
+ if {$abandoned} {
+ # The $cdb was opened in an env which was then
+ # abandoned, recovered, marked panic'ed. We don't
+ # really care; we're just trying to clean up resources.
+ #
+ catch {$cdb close}
+ } else {
+ error_check_good clientdb_close [$cdb close] 0
+ }
+ }
+ error_check_good masterenv_close [$envs($master) close] 0
+ error_check_good clientenv_close [$envs($test_client) close] 0
+ error_check_good clientenv2_close [$envs($other) close] 0
+ if { $abandoned } {
+ catch {$abandoned_env close}
+ }
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep040.tcl b/db-4.8.30/test/rep040.tcl
new file mode 100644
index 0000000..e099cc2
--- /dev/null
+++ b/db-4.8.30/test/rep040.tcl
@@ -0,0 +1,249 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep040
+# TEST Test of racing rep_start and transactions.
+# TEST
+# TEST One master, one client.
+# TEST Have master in the middle of a transaction.
+# TEST Call rep_start to make master a client.
+# TEST Commit the transaction.
+# TEST Call rep_start to make master the master again.
+#
+proc rep040 { method { niter 200 } { tnum "040" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $args):\
+ Test of rep_start racing txns $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep040_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep040_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set omethod [convert_method $method]
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -errpfx MASTER $repmemargs \
+ -home $masterdir $verbargs -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -errpfx CLIENT $repmemargs \
+ -home $clientdir $verbargs -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Set up databases in-memory or on-disk.
+ if { $databases_in_memory } {
+ set testfile { "" "rep040.db" }
+ set testfile1 { "" "rep040A.db" }
+ } else {
+ set testfile "rep040.db"
+ set testfile1 "rep040A.db"
+ }
+
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit -create \
+ -mode 0644} $largs $omethod $testfile]
+ error_check_good rep_db [is_valid_db $db] TRUE
+
+ set db1 [eval {berkdb_open_noerr -env $masterenv -auto_commit -create \
+ -mode 0644} $largs $omethod $testfile1]
+ error_check_good rep_db [is_valid_db $db1] TRUE
+
+ set key [expr $niter + 100]
+ set key2 [expr $niter + 200]
+ set data "data1"
+ set newdata "rep040test"
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv $db $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Get some data on a page
+ set t [$masterenv txn]
+ error_check_good txn [is_valid_txn $t $masterenv] TRUE
+ set ret [$db put -txn $t $key [chop_data $method $data]]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ process_msgs $envlist
+
+ #
+ # Start 2 txns. One that will commit early and one we'll hold
+ # open a while to test for the warning message.
+ #
+ # Now modify the data but don't commit it yet. This will
+ # update the same page and update the page LSN.
+ #
+ set t [$masterenv txn]
+ error_check_good txn [is_valid_txn $t $masterenv] TRUE
+ set t2 [$masterenv txn]
+ error_check_good txn [is_valid_txn $t2 $masterenv] TRUE
+ set ret [$db put -txn $t $key [chop_data $method $newdata]]
+ error_check_good put $ret 0
+ set ret [$db1 put -txn $t2 $key2 [chop_data $method $newdata]]
+ error_check_good put $ret 0
+ process_msgs $envlist
+
+ # Fork child process and then sleep for more than 1 minute so
+ # that the child process must block on the open transaction and
+ # it will print out the wait message.
+ #
+ set outfile "$testdir/rep040script.log"
+ puts "\tRep$tnum.b: Fork master child process and sleep 90 seconds"
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep040script.tcl $outfile $masterdir $databases_in_memory &]
+
+ tclsleep 10
+ process_msgs $envlist
+ error_check_good txn [$t commit] 0
+ tclsleep 80
+
+ error_check_good txn [$t2 commit] 0
+ puts "\tRep$tnum.c: Waiting for child ..."
+ process_msgs $envlist
+ watch_procs $pid 5
+
+ process_msgs $envlist
+
+ set t [$masterenv txn]
+ error_check_good txn [is_valid_txn $t $masterenv] TRUE
+ set ret [$db put -txn $t $key [chop_data $method $data]]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db1 close] 0
+ process_msgs $envlist
+
+ # Check that databases are in-memory or on-disk as expected.
+ check_db_location $masterenv $testfile
+ check_db_location $masterenv $testfile1
+ check_db_location $clientenv $testfile
+ check_db_location $clientenv $testfile1
+
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ #
+ # Check we detected outstanding txn (t2).
+ # The message we check for is produced only if the build was
+ # configured with --enable-diagnostic.
+ set conf [berkdb getconfig]
+ if { [is_substr $conf "diagnostic"] == 1 } {
+ puts "\tRep$tnum.d: Verify waiting and logs"
+ set ret [catch {open $outfile} ofid]
+ error_check_good open $ret 0
+ set contents [read $ofid]
+ error_check_good \
+ detect [is_substr $contents "Waiting for op_cnt"] 1
+ close $ofid
+ }
+
+ # Check that master and client logs and dbs are identical.
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $masterdir > $masterdir/prlog} result]
+ error_check_good stat_mprlog $stat 0
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $clientdir > $clientdir/prlog} result]
+ error_check_good stat_cprlog $stat 0
+ error_check_good log_cmp \
+ [filecmp $masterdir/prlog $clientdir/prlog] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep040script.tcl b/db-4.8.30/test/rep040script.tcl
new file mode 100644
index 0000000..89d1f39
--- /dev/null
+++ b/db-4.8.30/test/rep040script.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep040 script - transaction concurrency with rep_start
+#
+# Repscript exists to call rep_start. The main script will immediately
+# start a transaction, do an operation, then sleep a long time before
+# commiting the transaction. We should be blocked on the transaction
+# when we call rep_start. The main process should sleep long enough
+# that we get a diagnostic message.
+#
+# Usage: repscript masterdir clientdir
+# masterdir: master env directory
+# clientdir: client env directory
+#
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+global databases_in_memory
+
+set usage "repscript masterdir"
+
+# Verify usage
+if { $argc != 2 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set masterdir [ lindex $argv 0 ]
+set databases_in_memory [ lindex $argv 1 ]
+
+puts "databases_in_memory is $databases_in_memory"
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# We need to set up our own machids.
+# Add 1 for master env id, and 2 for the clientenv id.
+repladd 1
+repladd 2
+
+# Join the master env.
+set ma_cmd "berkdb_env_noerr -home $masterdir \
+ -errfile /dev/stderr -errpfx CHILD.MA \
+ -txn -rep_master -rep_transport \[list 1 replsend\]"
+# set ma_cmd "berkdb_env_noerr -home $masterdir \
+# -verbose {rep on} -errfile /dev/stderr -errpfx CHILD.MA \
+# -txn -rep_master -rep_transport \[list 1 replsend\]"
+set masterenv [eval $ma_cmd]
+error_check_good script_menv_open [is_valid_env $masterenv] TRUE
+
+puts "Master open"
+# Downgrade while transaction is open
+error_check_good downgrade [$masterenv rep_start -client] 0
+
+tclsleep 10
+# Upgrade again
+error_check_good upgrade [$masterenv rep_start -master] 0
+#
+# Create a btree database now.
+#
+rep_test btree $masterenv NULL 10 0 0 0
+
+# Close the envs
+puts "Closing Masterenv $masterenv"
+error_check_good script_master_close [$masterenv close] 0
+puts "\tRepscript completed successfully"
diff --git a/db-4.8.30/test/rep041.tcl b/db-4.8.30/test/rep041.tcl
new file mode 100644
index 0000000..3478c77
--- /dev/null
+++ b/db-4.8.30/test/rep041.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep041
+# TEST Turn replication on and off at run-time.
+# TEST
+# TEST Start a master with replication OFF (noop transport function).
+# TEST Run rep_test to advance log files and archive.
+# TEST Start up client; change master to working transport function.
+# TEST Now replication is ON.
+# TEST Do more ops, make sure client is up to date.
+# TEST Close client, turn replication OFF on master, do more ops.
+# TEST Repeat from point A.
+#
+proc rep041 { method { niter 500 } { tnum "041" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set saved_args $args
+
+ set logsets [create_logsets 2]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum ($method $envargs $r $args):\
+ Turn replication on and off, $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep041_sub $method $niter $tnum $envargs \
+ $l $r $args
+ }
+ }
+}
+
+proc rep041_sub { method niter tnum envargs logset recargs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ puts "\tRep$tnum.a: Open master with replication OFF."
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $verbargs \
+ $m_logargs -log_max $log_max $envargs -errpfx MASTER \
+ $repmemargs -home $masterdir -rep"
+ set masterenv [eval $ma_envcmd $recargs]
+ $masterenv rep_limit 0 0
+
+ # Run rep_test in the master to advance log files.
+ puts "\tRep$tnum.b: Running rep_test to create some log files."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+
+ # Reset transport function to replnoop, and specify that
+ # this env will be master.
+ error_check_good \
+ transport_noop [$masterenv rep_transport {1 replnoop}] 0
+ error_check_good rep_on [$masterenv rep_start -master] 0
+
+ # If master is on-disk, archive.
+ if { $m_logtype != "in-memory" } {
+ puts "\tRep$tnum.c: Run log_archive - some logs should be removed."
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1
+ }
+
+ # Run rep_test some more - this simulates running without clients.
+ puts "\tRep$tnum.d: Running rep_test."
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+
+ # Open a client
+ puts "\tRep$tnum.e: Open client."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $verbargs \
+ $c_logargs -log_max $log_max $envargs -errpfx CLIENT \
+ $repmemargs -home $clientdir \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ $clientenv rep_limit 0 0
+ $clientenv rep_request 4000 128000
+
+ # Set up envlist for processing messages later.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+
+ # Turn replication on and off more than once.
+ set repeats 2
+ for { set i 0 } { $i < $repeats } { incr i } {
+
+ puts "\tRep$tnum.f.$i: Turn replication ON."
+ # Reset master transport function to replsend.
+ error_check_good transport_on \
+ [$masterenv rep_transport {1 replsend}] 0
+
+ # Have the master announce itself so messages will pass.
+ error_check_good rep_on [$masterenv rep_start -master] 0
+
+ # Create some new messages, and process them.
+ set nentries 50
+ eval rep_test \
+ $method $masterenv NULL $nentries $start $start 0 $largs
+ incr start $nentries
+ process_msgs $envlist
+
+ puts "\tRep$tnum.g.$i: Verify that client is up to date."
+
+ # Check that master and client contents match, to verify
+ # that client is up to date.
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 1 0
+
+ # Process messages again -- the rep_verify created some.
+ process_msgs $envlist
+
+ puts "\tRep$tnum.h.$i: Turn replication OFF on master."
+ error_check_good \
+ transport_off [$masterenv rep_transport {1 replnoop}] 0
+
+ puts "\tRep$tnum.i.$i: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+
+ puts "\tRep$tnum.j.$i:\
+ Process messages; none should be available."
+ set nproced [proc_msgs_once $envlist NONE err]
+ error_check_good no_messages $nproced 0
+
+ # Client and master should NOT match.
+ puts "\tRep$tnum.k.$i: Master and client should NOT match."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 0 0
+
+ }
+
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep042.tcl b/db-4.8.30/test/rep042.tcl
new file mode 100644
index 0000000..50bddd1
--- /dev/null
+++ b/db-4.8.30/test/rep042.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep042
+# TEST Concurrency with updates.
+# TEST
+# TEST Verify racing role changes and updates don't result in
+# TEST pages with LSN 0,1. Set up an environment that is master.
+# TEST Spawn child process that does a delete, but using the
+# TEST $env check so that it sleeps in the middle of the call.
+# TEST Master downgrades and then sleeps as a client so that
+# TEST child will run. Verify child does not succeed (should
+# TEST get read-only error) due to role change in the middle of
+# TEST its call.
+proc rep042 { method { niter 10 } { tnum "042" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+
+ puts "Rep$tnum ($method $r):\
+ Concurrency with updates $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep042_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep042_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global perm_response_list
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_cmd "berkdb_env_noerr -create $repmemargs \
+ -log_max 1000000 $m_txnargs $m_logargs $verbargs \
+ -home $masterdir -rep_master -errpfx MASTER \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_cmd $recargs]
+
+ # Open a client
+ repladd 2
+ set cl_cmd "berkdb_env_noerr -create -home $clientdir $repmemargs \
+ $c_txnargs $c_logargs $verbargs -errpfx CLIENT -rep_client \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_cmd $recargs]
+
+ # Bring the client online.
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ puts "\tRep$tnum.a: Create and populate database."
+ set dbname rep042.db
+ set db [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ for { set i 1 } { $i < $niter } { incr i } {
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $db put -txn $t $i [chop_data $method data$i]] 0
+ error_check_good txn_commit [$t commit] 0
+ }
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ set ops {del truncate}
+ foreach op $ops {
+ # Fork child process on client. The child will do a delete.
+ set sleepval 4
+ set scrlog $testdir/repscript.log
+ puts "\tRep$tnum.b: Fork child process on client ($op)."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep042script.tcl $scrlog \
+ $masterdir $sleepval $dbname $op &]
+
+ # Wait for child process to start up.
+ while { 1 } {
+ if { [file exists $masterdir/marker.db] == 0 } {
+ tclsleep 1
+ } else {
+ tclsleep 1
+ break
+ }
+ }
+
+ puts "\tRep$tnum.c: Downgrade during child $op."
+ error_check_good downgrade [$masterenv rep_start -client] 0
+
+ puts "\tRep$tnum.d: Waiting for child ..."
+ # Watch until the child is done.
+ watch_procs $pid 5
+ puts "\tRep$tnum.e: Upgrade to master again ..."
+ error_check_good upgrade [$masterenv rep_start -master] 0
+ set end [expr $niter * 2]
+ for { set i $niter } { $i <= $end } { incr i } {
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $db put -txn $t $i [chop_data $method data$i]] 0
+ error_check_good txn_commit [$t commit] 0
+ }
+ process_msgs "{$masterenv 1} {$clientenv 2}"
+
+ # We expect to find the error "attempt to modify a read-only
+ # database." If we don't, report what we did find as a failure.
+ set readonly_error [check_script $scrlog "read-only"]
+ if { $readonly_error != 1 } {
+ set errstrings [eval findfail $scrlog]
+ if { [llength $errstrings] > 0 } {
+ puts "FAIL: unexpected error(s)\
+ found in file $scrlog:$errstrings"
+ }
+ }
+ fileremove -f $masterdir/marker.db
+ }
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc check_script { log str } {
+ set ret 0
+ set res [catch {open $log} id]
+ if { $res != 0 } {
+ puts "FAIL: open of $log failed: $id"
+ # Return 0
+ return $ret
+ }
+ while { [gets $id val] != -1 } {
+# puts "line: $val"
+ if { [is_substr $val $str] } {
+ set ret 1
+ break
+ }
+ }
+ close $id
+ return $ret
+}
diff --git a/db-4.8.30/test/rep042script.tcl b/db-4.8.30/test/rep042script.tcl
new file mode 100644
index 0000000..7a7e341
--- /dev/null
+++ b/db-4.8.30/test/rep042script.tcl
@@ -0,0 +1,78 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep042 script - concurrency with updates.
+
+# Usage: repscript masterdir sleepval dbname
+# masterdir: master env directory
+# sleepval: sleep value (in secs) to send to env test_check
+# dbname: name of database to use
+# op: operation: one of del or truncate
+#
+source ./include.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript masterdir sleepval dbname op"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set masterdir [ lindex $argv 0 ]
+set sleepval [ lindex $argv 1 ]
+set dbname [lindex $argv 2]
+set op [lindex $argv 3]
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# We need to set up our own machids.
+# Add 1 for master env id, and 2 for the clientenv id.
+#
+repladd 1
+repladd 2
+
+# Join the master env.
+set ma_cmd "berkdb_env_noerr -home $masterdir \
+ -txn -rep_master -rep_transport \[list 1 replsend\]"
+# set ma_cmd "berkdb_env_noerr -home $masterdir \
+# -verbose {rep on} -errfile /dev/stderr \
+# -txn -rep_master -rep_transport \[list 1 replsend\]"
+set masterenv [eval $ma_cmd]
+error_check_good script_menv_open [is_valid_env $masterenv] TRUE
+
+puts "Master open"
+set db [eval "berkdb_open -auto_commit -env $masterenv $dbname"]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Make it so that the process sleeps in the middle of a delete.
+$masterenv test check $sleepval
+
+# Create marker file
+set marker [open $masterdir/marker.db w]
+close $marker
+
+if { $op == "del" } {
+ # Just delete record 1 - we know that one is in there.
+ set stat [catch {$db del 1} ret]
+ puts "Stat: $stat"
+ puts "Ret: $ret"
+} elseif { $op == "truncate" } {
+ set stat [catch {$db truncate} ret]
+ puts "Stat: $stat"
+ puts "Ret: $ret"
+} else {
+ puts "Stat: FAIL: invalid operation specified"
+}
+# Close the envs
+error_check_good script_db_close [$db close] 0
+error_check_good script_master_close [$masterenv close] 0
+puts "\tRepscript completed successfully"
diff --git a/db-4.8.30/test/rep043.tcl b/db-4.8.30/test/rep043.tcl
new file mode 100644
index 0000000..fed5bc1
--- /dev/null
+++ b/db-4.8.30/test/rep043.tcl
@@ -0,0 +1,246 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep043
+# TEST
+# TEST Constant writes during upgrade/downgrade.
+# TEST
+# TEST Three envs take turns being master. Each env
+# TEST has a child process which does writes all the
+# TEST time. They will succeed when that env is master
+# TEST and fail when it is not.
+
+proc rep043 { method { rotations 25 } { tnum "043" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Skip for record-based methods.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_record_based $method] != 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_record_based $method] == 1 } {
+ puts "Skipping rep$tnum for record-based methods."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Constant writes with \
+ rotating master $rotations times $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep043_sub $method $rotations $tnum $l $r $args
+ }
+ }
+}
+
+proc rep043_sub { method rotations tnum logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set orig_tdir $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/ENV0
+ set clientdir $testdir/ENV1
+ set clientdir2 $testdir/ENV2
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ set niter 200
+ set testfile rep043.db
+ set omethod [convert_method $method]
+
+ # Since we're constantly switching master in this test run
+ # each with a different cache size just to verify that cachesize
+ # doesn't matter for different sites.
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs -errpfx ENV0 -errfile /dev/stderr $verbargs \
+ -cachesize {0 4194304 3} -lock_detect default \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set env0 [eval $ma_envcmd $recargs -rep_master]
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs -errpfx ENV1 -errfile /dev/stderr $verbargs \
+ -cachesize {0 2097152 2} -lock_detect default \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set env1 [eval $cl_envcmd $recargs -rep_client]
+
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $repmemargs \
+ $c2_logargs -errpfx ENV2 -errfile /dev/stderr $verbargs \
+ -cachesize {0 1048576 1} -lock_detect default \
+ -home $clientdir2 -rep_transport \[list 3 replsend\]"
+ set env2 [eval $cl2_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$env0 1} {$env1 2} {$env2 3}"
+ process_msgs $envlist
+
+ # Set up marker file.
+ set markerenv [berkdb_env -create -home $testdir -txn]
+ error_check_good marker_open [is_valid_env $markerenv] TRUE
+ set marker [eval "berkdb_open \
+ -create -btree -auto_commit -env $markerenv marker.db"]
+
+ # Start the 3 child processes: one for each env.
+ set pids {}
+ set dirlist "0 $masterdir 1 $clientdir 2 $clientdir2"
+ foreach { writer dir } $dirlist {
+ puts "\tRep$tnum.a: Fork child process WRITER$writer."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep043script.tcl $testdir/rep043script.log.$writer \
+ $dir $writer &]
+ lappend pids $pid
+ }
+
+ # For the first iteration, masterenv is $env0.
+ set masterenv $env0
+ set curdir $masterdir
+
+ # Write $niter entries to master, then rotate.
+ for { set i 0 } { $i < $rotations } { incr i } {
+
+ # Identify current master, determine next master
+ if { $masterenv == $env0 } {
+ set nextmasterenv $env1
+ set nextdir $clientdir
+ } elseif { $masterenv == $env1 } {
+ set nextmasterenv $env2
+ set nextdir $clientdir2
+ } elseif { $masterenv == $env2 } {
+ set nextmasterenv $env0
+ set nextdir $masterdir
+ } else {
+ puts "FAIL: could not identify current master"
+ return
+ }
+
+ puts "\tRep$tnum.b.$i: Open master db in $curdir."
+ set mdb [eval {berkdb_open_noerr} -env $masterenv -auto_commit \
+ -mode 0644 $omethod -create $testfile]
+ error_check_good dbopen [is_valid_db $mdb] TRUE
+ error_check_good marker_iter [$marker put ITER $i] 0
+
+ puts "\t\tRep$tnum.c.$i: Put data to master."
+ for { set j 0 } { $j < $niter } { incr j } {
+ set key KEY.$i.$j
+ set data DATA
+ set t [$masterenv txn]
+ set stat [catch \
+ {eval {$mdb put} -txn $t $key $data} ret]
+ if { $ret == 0 } {
+ error_check_good commit [$t commit] 0
+ } else {
+ error_check_good commit [$t abort] 0
+ }
+ }
+ error_check_good mdb_close [$mdb close] 0
+
+ # Checkpoint.
+ error_check_good checkpoint [$masterenv txn_checkpoint] 0
+
+ process_msgs $envlist
+
+ puts "\t\tRep$tnum.d.$i: Downgrade current master."
+ error_check_good downgrade [$masterenv rep_start -client] 0
+
+ puts "\t\tRep$tnum.e.$i: Upgrade next master $nextdir."
+ error_check_good upgrade [$nextmasterenv rep_start -master] 0
+ set masterenv $nextmasterenv
+ set curdir $nextdir
+
+ process_msgs $envlist
+ }
+
+
+ puts "\tRep$tnum.f: Clean up."
+ # Tell the child processes we are done.
+ error_check_good marker_done [$marker put DONE DONE] 0
+ error_check_good marker_close [$marker close] 0
+ error_check_good markerenv_close [$markerenv close] 0
+
+ error_check_good env0_close [$env0 close] 0
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+
+ # Make sure the child processes are done.
+ watch_procs $pids 1
+
+ # Check log files for failures.
+ for { set n 0 } { $n < 3 } { incr n } {
+ set file rep043script.log.$n
+ set errstrings [eval findfail $testdir/$file]
+ foreach str $errstrings {
+ puts "FAIL: error message in file $file: $str"
+ }
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
diff --git a/db-4.8.30/test/rep043script.tcl b/db-4.8.30/test/rep043script.tcl
new file mode 100644
index 0000000..df27843
--- /dev/null
+++ b/db-4.8.30/test/rep043script.tcl
@@ -0,0 +1,125 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep043 script - constant writes to an env which may be
+# either a master or a client, or changing between the
+# two states
+#
+# Usage: dir writerid
+# dir: Directory of writer
+# writerid: i.d. number for writer
+
+set usage "rep043script dir writerid"
+
+# Verify usage
+if { $argc != 2 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [ lindex $argv 0 ]
+set writerid [ lindex $argv 1 ]
+set nentries 50
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# We need to set up our own machids.
+set envid [expr $writerid + 1]
+repladd $envid
+set name "WRITER.$writerid"
+
+# Pause a bit to give the master a chance to create the database
+# before we try to join.
+tclsleep 3
+
+# Join the env.
+set env_cmd "berkdb_env_noerr -home $dir -lock_detect default \
+ -errfile /dev/stderr -errpfx WRITER.$writerid \
+ -txn -rep_transport \[list $envid replsend\]"
+# set env_cmd "berkdb_env_noerr -home $dir \
+# -errfile /dev/stderr -errpfx WRITER.$writerid \
+# -verbose {rep on} -lock_detect default \
+# -txn -rep_transport \[list $envid replsend\]"
+set dbenv [eval $env_cmd]
+error_check_good script_env_open [is_valid_env $dbenv] TRUE
+
+# Open database. It's still possible under heavy load that the
+# master hasn't created the database, so pause even longer if it's
+# not there.
+set testfile "rep043.db"
+while {[catch {berkdb_open_noerr -errpfx $name -errfile /dev/stderr\
+ -env $dbenv -auto_commit $testfile} db]} {
+ puts "Could not open handle $db, sleeping 1 second."
+ tclsleep 1
+}
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Communicate with parent in marker file.
+set markerenv [berkdb_env -home $testdir -txn]
+error_check_good markerenv_open [is_valid_env $markerenv] TRUE
+set marker [eval "berkdb_open \
+ -create -btree -auto_commit -env $markerenv marker.db"]
+
+# Write records to the database.
+set iter INIT
+set olditer $iter
+while { [llength [$marker get DONE]] == 0 } {
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set kd [$marker get ITER]
+ if { [llength $kd] == 0 } {
+ set iter X
+ } else {
+ set iter [lindex [lindex $kd 0] 1]
+ }
+ if { $iter != $olditer } {
+ puts "Entry $i: Iter changed from $olditer to $iter"
+ set olditer $iter
+ }
+
+ set key WRITER.$writerid.$iter.$i
+ set str string.$i
+
+ set t [$dbenv txn]
+ error_check_good txn [is_valid_txn $t $dbenv] TRUE
+ set stat [catch {$db put -txn $t $key $str} res]
+ if { $stat == 0 } {
+puts "res is $res, commit"
+ error_check_good txn_commit [$t commit] 0
+ } else {
+puts "res is $res, abort"
+ error_check_good txn_abort [$t abort] 0
+ }
+
+ # If the handle is dead, get a new one.
+ if { [is_substr $res DB_REP_HANDLE_DEAD] == 1 } {
+puts "Close - dead handle."
+ error_check_good db_close [$db close] 0
+puts "Getting new handle"
+ while {[catch {berkdb_open_noerr -env $dbenv\
+ -auto_commit $testfile} db]} {
+ puts "Could not open handle: $db"
+ tclsleep 1
+ }
+ error_check_good db_open [is_valid_db $db] TRUE
+ }
+
+ if { [expr $i % 10] == 1 } {
+ puts "Wrote WRITER.$writerid.$iter.$i record $i"
+ }
+ }
+ tclsleep 1
+}
+
+# Clean up.
+error_check_good db_close [$db close] 0
+error_check_good dbenv_close [$dbenv close] 0
+replclose $testdir/MSGQUEUEDIR
+error_check_good marker_close [$marker close] 0
+error_check_good markerenv_close [$markerenv close] 0
diff --git a/db-4.8.30/test/rep044.tcl b/db-4.8.30/test/rep044.tcl
new file mode 100644
index 0000000..c7b1c5d
--- /dev/null
+++ b/db-4.8.30/test/rep044.tcl
@@ -0,0 +1,287 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep044
+# TEST
+# TEST Test rollbacks with open file ids.
+# TEST
+# TEST We have one master with two handles and one client.
+# TEST Each time through the main loop, we open a db, write
+# TEST to the db, and close the db. Each one of these actions
+# TEST is propagated to the client, or a roll back is forced
+# TEST by swapping masters.
+
+proc rep044 { method { tnum "044" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # HP-UX can't open two handles on the same env, so it
+ # can't run this test.
+ if { $is_hp_test == 1 } {
+ puts "Skipping rep$tnum for HP-UX."
+ return
+ }
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ puts "Rep$tnum ($method): Replication with rollbacks\
+ and open file ids $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ rep044_sub $method $tnum $l $args
+ }
+}
+
+proc rep044_sub { method tnum logset largs } {
+ source ./include.tcl
+ set orig_tdir $testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set masterdir $testdir/ENV0
+ set clientdir $testdir/ENV1
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ set niter 20
+ set omethod [convert_method $method]
+
+ # The main loop runs all the permutations of processing/not
+ # processing the database open to the clients; processing/not
+ # processing the database writes to the clients; and processing/
+ # not processing the database close to the clients. Set up the
+ # options in advance so the loop is not heavily indented.
+ #
+ # Each entry displays { open write close }.
+ # For example { 1 1 0 } means we process messages after the
+ # db open and the db writes but not after the db close.
+
+ set optionsets {
+ {1 1 1}
+ {1 1 0}
+ {1 0 1}
+ {1 0 0}
+ {0 1 1}
+ {0 1 0}
+ {0 0 1}
+ {0 0 0}
+ }
+
+ # Main loop.
+ foreach set $optionsets {
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set processopens [lindex $set 0]
+ set processwrites [lindex $set 1]
+ set processcloses [lindex $set 2]
+
+ set notdoing {}
+ if { $processopens == 0 } {
+ append notdoing " OPENS"
+ }
+ if { $processwrites == 0 } {
+ append notdoing " WRITES"
+ }
+ if { $processcloses == 0 } {
+ append notdoing " CLOSES"
+ }
+ if { $notdoing != {} } {
+ puts "Rep$tnum:\
+ Loop with $notdoing not processed to client."
+ }
+
+ # Open a master.
+ repladd 1
+ set envcmd(M0) "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -lock_detect default $repmemargs \
+ -errpfx ENV.M0 $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set menv0 [eval $envcmd(M0) -rep_master]
+
+ # Open second handle on master env.
+ set envcmd(M1) "berkdb_env_noerr $m_txnargs \
+ $m_logargs -lock_detect default $repmemargs \
+ -errpfx ENV.M1 $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set menv1 [eval $envcmd(M1)]
+ error_check_good rep_start [$menv1 rep_start -master] 0
+
+ # Open a client
+ repladd 2
+ set envcmd(C) "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -errpfx ENV.C $verbargs $repmemargs \
+ -lock_detect default \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set cenv [eval $envcmd(C) -rep_client]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$menv0 1} {$cenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.a: Run rep_test in 1st master env."
+ set start 0
+ eval rep_test $method $menv0 NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Open db in 2nd master env."
+ # Open the db here; we want it to remain open after rep_test.
+
+ # Set up database as in-memory or on-disk.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ set db1 [eval {berkdb_open_noerr -env $menv1 -auto_commit \
+ -mode 0644} $largs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ if { $processopens == 1 } {
+ puts "\tRep$tnum.b1:\
+ Process db open messages to client."
+ process_msgs $envlist
+ } else {
+ set start [do_switch $method $niter $start $menv0 $cenv $largs]
+ }
+
+ puts "\tRep$tnum.c: Write to database in 2nd master."
+ # We don't use rep_test here, because sometimes we abort.
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set t [$menv1 txn]
+ set key $i
+ set str STRING.$i
+ if [catch {eval {$db1 put}\
+ -txn $t {$key [chop_data $method $str]}} result] {
+ # If handle is dead, abort txn, then
+ # close and reopen db.
+ error_check_good handle_dead \
+ [is_substr $result HANDLE_DEAD] 1
+ error_check_good txn_abort [$t abort] 0
+ error_check_good close_handle [$db1 close] 0
+ set db1 [eval {berkdb_open_noerr \
+ -env $menv1 -auto_commit -mode 0644}\
+ $largs $omethod $dbname]
+ } else {
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+
+ if { $processwrites == 1 } {
+ puts "\tRep$tnum.c1:\
+ Process db put messages to client."
+ process_msgs $envlist
+ } else {
+ set start [do_switch $method $niter $start $menv0 $cenv $largs]
+ }
+
+ puts "\tRep$tnum.d: Close database using 2nd master env handle."
+ error_check_good db_close [$db1 close] 0
+
+ if { $processcloses == 1 } {
+ puts "\tRep$tnum.d1:\
+ Process db close messages to client."
+ process_msgs $envlist
+ } else {
+ set start [do_switch $method $niter $start $menv0 $cenv $largs]
+ }
+
+ # Check that databases are in-memory or on-disk as expected.
+ check_db_location $menv0
+ check_db_location $menv1
+ check_db_location $cenv
+
+ puts "\tRep$tnum.e: Clean up."
+ error_check_good menv0_close [$menv0 close] 0
+ error_check_good menv1_close [$menv1 close] 0
+ error_check_good cenv_close [$cenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+ }
+ set testdir $orig_tdir
+ return
+}
+
+proc do_switch { method niter start masterenv clientenv largs } {
+ set envlist "{$masterenv 1} {$clientenv 2}"
+
+ # Downgrade master, upgrade client.
+ error_check_good master_downgrade [$masterenv rep_start -client] 0
+ error_check_good client_upgrade [$clientenv rep_start -master] 0
+ process_msgs $envlist
+
+ # Run rep_test in the new master.
+ eval rep_test $method $clientenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ # Downgrade newmaster, upgrade original master.
+ error_check_good client_downgrade [$clientenv rep_start -client] 0
+ error_check_good master_upgrade [$masterenv rep_start -master] 0
+
+ # Run rep_test in the restored master.
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ return $start
+}
diff --git a/db-4.8.30/test/rep045.tcl b/db-4.8.30/test/rep045.tcl
new file mode 100644
index 0000000..ba406e9
--- /dev/null
+++ b/db-4.8.30/test/rep045.tcl
@@ -0,0 +1,286 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep045
+# TEST
+# TEST Replication with versions.
+# TEST
+# TEST Mimic an application where a database is set up in the
+# TEST background and then put into a replication group for use.
+# TEST The "version database" identifies the current live
+# TEST version, the database against which queries are made.
+# TEST For example, the version database might say the current
+# TEST version is 3, and queries would then be sent to db.3.
+# TEST Version 4 is prepared for use while version 3 is in use.
+# TEST When version 4 is complete, the version database is updated
+# TEST to point to version 4 so queries can be directed there.
+# TEST
+# TEST This test has a master and two clients. One client swaps
+# TEST roles with the master, and the other client runs constantly
+# TEST in another process.
+
+proc rep045 { method { tnum "045" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ puts "Rep$tnum ($method): Replication with version\
+ databases $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep045_sub $method $tnum $l $args
+ }
+}
+
+proc rep045_sub { method tnum logset largs } {
+ source ./include.tcl
+ set orig_tdir $testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir0 $testdir/CLIENTDIR0
+ set clientdir1 $testdir/CLIENTDIR1
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+ file mkdir $masterdir
+ file mkdir $clientdir0
+ file mkdir $clientdir1
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ set omethod [convert_method $method]
+
+ # Open a master.
+ repladd 1
+ set envcmd(M0) "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -errpfx ENV.M0 $verbargs $repmemargs \
+ -errfile /dev/stderr -lock_detect default \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set menv [eval $envcmd(M0) -rep_master]
+
+ # Open a client
+ repladd 2
+ set envcmd(C0) "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -errpfx ENV.C0 $verbargs $repmemargs \
+ -errfile /dev/stderr -lock_detect default \
+ -home $clientdir0 -rep_transport \[list 2 replsend\]"
+ set cenv0 [eval $envcmd(C0) -rep_client]
+
+ # Open second client.
+ repladd 3
+ set envcmd(C1) "berkdb_env_noerr -create $c2_txnargs \
+ $c2_logargs -errpfx ENV.C1 $verbargs $repmemargs \
+ -errfile /dev/stderr -lock_detect default \
+ -home $clientdir1 -rep_transport \[list 3 replsend\]"
+ set cenv1 [eval $envcmd(C1) -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$menv 1} {$cenv0 2} {$cenv1 3}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # db_remove in a moment.
+ #
+ $menv test force noarchive_timeout
+
+ puts "\tRep$tnum.a: Initialize version database."
+ # Set up variables so we cycle through version numbers 1
+ # through maxversion several times.
+ if { $databases_in_memory } {
+ set vname { "" "version.db" }
+ } else {
+ set vname "version.db"
+ }
+ set version 0
+ set maxversion 5
+ set iter 12
+ set nentries 100
+ set start 0
+
+ # The version db is always btree.
+ set vdb [eval {berkdb_open_noerr -env $menv -create \
+ -auto_commit -mode 0644} -btree $vname]
+ error_check_good init_version [$vdb put VERSION $version] 0
+ error_check_good vdb_close [$vdb close] 0
+ process_msgs $envlist
+
+ # Start up a separate process that constantly reads data
+ # from the current official version.
+ puts "\tRep$tnum.b: Spawn a child tclsh to do client work."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep045script.tcl $testdir/rep045script.log \
+ $clientdir1 $vname $databases_in_memory &]
+
+ # Main loop: update query database, process messages (or don't,
+ # simulating a failure), announce the new version, process
+ # messages (or don't), and swap masters.
+ set version 1
+ for { set i 1 } { $i < $iter } { incr i } {
+
+ # If database.N exists on disk, clean it up.
+ if { $databases_in_memory } {
+ set dbname { "" "db.$version" }
+ } else {
+ set dbname "db.$version"
+ }
+ if { [file exists $masterdir/$dbname] == 1 } {
+ puts "\tRep$tnum.c.$i: Removing old version $version."
+ error_check_good dbremove \
+ [$menv dbremove -auto_commit $dbname] 0
+ }
+
+ puts "\tRep$tnum.c.$i: Set up query database $version."
+ set db [eval berkdb_open_noerr -create -env $menv\
+ -auto_commit -mode 0644 $largs $omethod $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ eval rep_test $method $menv $db $nentries $start $start 0 $largs
+ incr start $nentries
+ error_check_good db_close [$db close] 0
+
+ # We alternate between processing the messages and
+ # clearing the messages to simulate a failure.
+
+ set process [expr $i % 2]
+ if { $process == 1 } {
+ process_msgs $envlist
+ } else {
+ replclear 2
+ replclear 3
+ }
+
+ # Announce new version.
+ puts "\tRep$tnum.d.$i: Announce new version $version."
+ set vdb [eval {berkdb_open_noerr -env $menv \
+ -auto_commit -mode 0644} $vname]
+ error_check_good update_version [$vdb put VERSION $version] 0
+ error_check_good vdb_close [$vdb close] 0
+
+ # Process messages or simulate failure.
+ if { $process == 1 } {
+ process_msgs $envlist
+ } else {
+ replclear 2
+ replclear 3
+ }
+
+ # Switch master, update envlist.
+ puts "\tRep$tnum.e.$i: Switch masters."
+ set envlist [switch_master $envlist]
+
+ # Update values for next iteration.
+ set menv [lindex [lindex $envlist 0] 0]
+ set cenv0 [lindex [lindex $envlist 1] 0]
+ incr version
+ if { $version > $maxversion } {
+ set version 1
+ }
+ }
+
+ # Signal to child that we are done.
+ set vdb [eval {berkdb_open_noerr -env $menv \
+ -auto_commit -mode 0644} $vname]
+ error_check_good version_done [$vdb put VERSION DONE] 0
+ error_check_good vdb_close [$vdb close] 0
+ process_msgs $envlist
+
+ # Watch for child to finish.
+ watch_procs $pid 5
+
+ puts "\tRep$tnum.f: Clean up."
+ error_check_good menv_close [$menv close] 0
+ error_check_good cenv0_close [$cenv0 close] 0
+ error_check_good cenv1_close [$cenv1 close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+
+ # Check for failures in child's log file.
+ set errstrings [eval findfail $testdir/rep045script.log]
+ foreach str $errstrings {
+ puts "FAIL: error message in log file: $str"
+ }
+
+ set testdir $orig_tdir
+ return
+}
+
+proc switch_master { envlist } {
+ # Find env handles and machine ids.
+ set menv [lindex [lindex $envlist 0] 0]
+ set mid [lindex [lindex $envlist 0] 1]
+ set cenv [lindex [lindex $envlist 1] 0]
+ set cid [lindex [lindex $envlist 1] 1]
+ set cenv1 [lindex [lindex $envlist 2] 0]
+ set cid1 [lindex [lindex $envlist 2] 1]
+
+ # Downgrade master, upgrade client.
+ error_check_good master_downgrade [$menv rep_start -client] 0
+ error_check_good client_upgrade [$cenv rep_start -master] 0
+ process_msgs $envlist
+
+ # Adjust envlist. The former client env is the new master,
+ # and vice versa.
+ set newenvlist "{$cenv $cid} {$menv $mid} {$cenv1 $cid1}"
+ return $newenvlist
+}
diff --git a/db-4.8.30/test/rep045script.tcl b/db-4.8.30/test/rep045script.tcl
new file mode 100644
index 0000000..966760e
--- /dev/null
+++ b/db-4.8.30/test/rep045script.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep045 script - replication with version dbs.
+#
+# Usage: rep045script clientdir vfile
+# clientdir: client env directory
+# vfile: name of version file
+#
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript clientdir vfile"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set clientdir [ lindex $argv 0 ]
+set vfile [ lindex $argv 1 ]
+global databases_in_memory
+set databases_in_memory [ lindex $argv 2 ]
+set niter 50
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+# We need to set up our own machids.
+repladd 3
+
+# Join the client env.
+set cl_cmd "berkdb_env_noerr -home $clientdir \
+ -txn -rep_client -rep_transport \[list 3 replsend\]"
+# set cl_cmd "berkdb_env_noerr -home $clientdir \
+# -verbose {rep on} -errfile /dev/stderr \
+# -txn -rep_client -rep_transport \[list 3 replsend\]"
+set clientenv [eval $cl_cmd]
+error_check_good script_cenv_open [is_valid_env $clientenv] TRUE
+
+# Start up deadlock detector.
+set dpid [exec $util_path/db_deadlock \
+ -a o -v -t 5 -h $clientdir >& $testdir/dd.out &]
+
+# Initialize version number. Don't try to open the first
+# version database until the master has completed setting it up.
+set version 0
+while {[catch {berkdb_open_noerr -env $clientenv -rdonly $vfile} vdb]} {
+ puts "FAIL: vdb open failed: $vdb"
+ tclsleep 1
+}
+
+while { $version == 0 } {
+ tclsleep 1
+ if { [catch {$vdb get VERSION} res] } {
+ # If we encounter an error, check what kind of
+ # error it is.
+ if { [is_substr $res DB_LOCK_DEADLOCK] == 1 } {
+ # We're deadlocked. Just wait for the
+ # deadlock detector to break the deadlock.
+ } elseif { [is_substr $res DB_REP_HANDLE_DEAD] == 1 } {
+ # Handle is dead. Get a new handle.
+ error_check_good vdb_close [$vdb close] 0
+ set vdb [eval berkdb_open -env $clientenv\
+ -rdonly $vfile]
+ } else {
+ # We got something we didn't expect.
+ puts "FAIL: Trying to get version, got $res"
+ break
+ }
+ } else {
+ # No error was encountered.
+ set version [lindex [lindex $res 0] 1]
+ }
+}
+error_check_good close_vdb [$vdb close] 0
+set dbfile db.$version
+
+# Open completed database version $version.
+if {[catch {berkdb_open -rdonly -env $clientenv $dbfile} db]} {
+ puts "FAIL: db open failed: $db"
+}
+error_check_good db_open [is_valid_db $db] TRUE
+
+# While parent process is not done, read from current database.
+# Periodically check version and update current database when
+# necessary.
+while { 1 } {
+ set dbc [$db cursor]
+ set i 0
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { $i < $niter } \
+ { set dbt [$dbc get -next] } {
+ incr i
+ }
+ error_check_good cursor_close [$dbc close] 0
+
+ while {[catch {berkdb_open -env $clientenv -rdonly $vfile} vdb]} {
+ puts "open failed: vdb is $vdb"
+ tclsleep 1
+ }
+ set ret [$vdb get VERSION]
+
+ set newversion [lindex [lindex $ret 0] 1]
+ error_check_good close_vdb [$vdb close] 0
+ error_check_bad check_newversion $newversion ""
+ if { $newversion != $version } {
+ if { $newversion == "DONE" } {
+ break
+ } elseif { $newversion == 0 } {
+ puts "FAIL: version has reverted to 0"
+ continue
+ } else {
+ error_check_good db_close [$db close] 0
+ set version $newversion
+ set dbfile db.$version
+ while {[catch \
+ {berkdb_open -env $clientenv -rdonly $dbfile} db]} {
+ puts "db open of new db failed: $db"
+ tclsleep 1
+ }
+ error_check_good db_open [is_valid_db $db] TRUE
+ }
+ }
+
+ # Pause a few seconds to allow the parent to do some work.
+ tclsleep 3
+}
+
+# Clean up.
+error_check_good kill_deadlock_detector [tclkill $dpid] ""
+error_check_good db_close [$db close] 0
+error_check_good script_client_close [$clientenv close] 0
diff --git a/db-4.8.30/test/rep046.tcl b/db-4.8.30/test/rep046.tcl
new file mode 100644
index 0000000..d1622b9
--- /dev/null
+++ b/db-4.8.30/test/rep046.tcl
@@ -0,0 +1,339 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep046
+# TEST Replication and basic bulk transfer.
+# TEST Set bulk transfer replication option.
+# TEST Run long txns on master and then commit. Process on client
+# TEST and verify contents. Run a very long txn so that logging
+# TEST must send the log. Process and verify on client.
+#
+proc rep046 { method { nentries 200 } { tnum "046" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ set throttle { "throttle" "" }
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping test with -recover for \
+ in-memory logs."
+ continue
+ }
+ foreach t $throttle {
+ puts "Rep$tnum ($method $r $t):\
+ Replication and bulk transfer $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 2]"
+ rep046_sub $method $nentries $tnum $l $r \
+ $t $args
+ }
+ }
+ }
+}
+
+proc rep046_sub { method niter tnum logset recargs throttle largs } {
+ global overflowword1
+ global overflowword2
+ global databases_in_memory
+ global repfiles_in_memory
+ global testdir
+ global util_path
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set orig_tdir $testdir
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ set in_memory_log \
+ [expr { $m_logtype == "in-memory" || $c_logtype == "in-memory" || \
+ $c2_logtype == "in-memory" }]
+
+ # In-memory logs require a large log buffer, and can not
+ # be used with -txn nosync. Adjust the args for master
+ # and client.
+ # This test has a long transaction, allocate a larger log
+ # buffer for in-memory test.
+ set m_logargs [adjust_logargs $m_logtype [expr 20 * 1024 * 1024]]
+ set c_logargs [adjust_logargs $c_logtype [expr 20 * 1024 * 1024]]
+ set c2_logargs [adjust_logargs $c2_logtype [expr 20 * 1024 * 1024]]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # If replication files are in-memory we'll need a bigger cache.
+ set cacheargs ""
+ if { $repfiles_in_memory } {
+ set cachesize [expr 8 * (1024 * 1024)]
+ set cacheargs "-cachesize {0 $cachesize 1} "
+ }
+
+ set bigniter [expr 10000 - [expr 2 * $niter]]
+ set lkmax [expr $bigniter * 2]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ $repmemargs $cacheargs \
+ $verbargs -lock_max_locks 10000 -lock_max_objects 10000 \
+ -errpfx MASTER -home $masterdir -rep_master -rep_transport \
+ \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ $repmemargs $cacheargs \
+ $verbargs -home $clientdir -errpfx CLIENT \
+ -lock_max_locks 10000 -lock_max_objects 10000 \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs]
+
+ if { $throttle == "throttle" } {
+ set clientdir2 $testdir/CLIENTDIR2
+ file mkdir $clientdir2
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs $verbargs \
+ $repmemargs $cacheargs \
+ $c2_logargs -home $clientdir2 -errpfx CLIENT2 \
+ -lock_max_locks 10000 -lock_max_objects 10000 \
+ -rep_client -rep_transport \[list 3 replsend\]"
+ set cl2env [eval $cl2_envcmd $recargs]
+ set envlist "{$masterenv 1} {$clientenv 2} {$cl2env 3}"
+ #
+ # Turn throttling on in master
+ #
+ error_check_good thr [$masterenv rep_limit 0 [expr 32 * 1024]] 0
+ } else {
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ }
+ # Bring the client online by processing the startup messages.
+ process_msgs $envlist
+
+ #
+ # Turn on bulk processing now on the master.
+ #
+ error_check_good set_bulk [$masterenv rep_config {bulk on}] 0
+
+ puts "\tRep$tnum.a: Create and open master database"
+ # Set up databases as in-memory or on-disk.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ set omethod [convert_method $method]
+ set masterdb [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ # Process database.
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.b: Basic long running txn"
+ set bulkrec1 [stat_field $masterenv rep_stat "Bulk records stored"]
+ set bulkxfer1 [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+
+ set overflowword1 "0"
+ set overflowword2 "0"
+ rep_test_bulk $method $masterenv $masterdb $niter 0 0
+ process_msgs $envlist
+ set bulkrec2 [stat_field $masterenv rep_stat "Bulk records stored"]
+ set bulkxfer2 [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ error_check_good recstat [expr $bulkrec2 > $bulkrec1] 1
+ error_check_good xferstat [expr $bulkxfer2 > $bulkxfer1] 1
+ rep_verify $masterdir $masterenv\
+ $clientdir $clientenv $in_memory_log 1 1
+
+ puts "\tRep$tnum.c: Very long txn"
+ # Determine whether this build is configured with --enable-debug_rop
+ # or --enable-debug_wop.
+ set conf [berkdb getconfig]
+ set debug_rop_wop 0
+ if { [is_substr $conf "debug_rop"] == 1 || \
+ [is_substr $conf "debug_wop"] == 1 } {
+ set debug_rop_wop 1
+ }
+
+ # If debug_rop/wop is set test will write more info to log.
+ # An in-memory log has a smaller "file" size than the large
+ # items written in this part of the test, so skip this section
+ # if any in-memory logs and debug_rop/wop is set.
+ if { $in_memory_log == 1 && $debug_rop_wop == 1 } {
+ puts "\t\tSkipping for in-memory log and debug_rop/wop"
+ } else {
+ set skip $niter
+ set start $niter
+ set orig $niter
+ set bulkfill1 [stat_field $masterenv rep_stat \
+ "Bulk buffer fills"]
+ rep_test_bulk $method $masterenv $masterdb $bigniter \
+ $start $skip
+ set start [expr $niter + $bigniter]
+ if { $throttle == "throttle" } {
+ #
+ # If we're throttling clear all messages from client 3
+ # so that we force a huge gap that the client will have
+ # to ask for to invoke a rerequest that throttles.
+ #
+ replclear 3
+ set old_thr \
+ [stat_field $masterenv rep_stat \
+ "Transmission limited"]
+ }
+ process_msgs $envlist
+ set bulkfill2 [stat_field $masterenv rep_stat \
+ "Bulk buffer fills"]
+ error_check_good fillstat [expr $bulkfill2 > $bulkfill1] 1
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ $in_memory_log 1 1
+ }
+
+ puts "\tRep$tnum.d: Very large data"
+
+ # If debug_rop/wop is set test will write entire item to log.
+ # An in-memory log has a smaller "file" size than the large
+ # items written in this part of the test, so skip this section
+ # if any in-memory logs and debug_rop/wop is set.
+ if { $in_memory_log == 1 && $debug_rop_wop == 1 } {
+ puts "\t\tSkipping for in-memory log and debug_rop/wop"
+ } else {
+ set bulkovf1 [stat_field $masterenv rep_stat \
+ "Bulk buffer overflows"]
+ set bulkfill1 [stat_field $masterenv rep_stat \
+ "Bulk buffer fills"]
+ #
+ # Send in '2' exactly because we're sending in the flag to use
+ # the overflow entries. We have 2 overflow entries.
+ # If it's fixed length, we can't overflow. Induce throttling
+ # by putting in a bunch more entries. Create a gap by
+ # forcing a checkpoint record.
+ #
+ $masterenv txn_checkpoint -force
+ process_msgs $envlist
+ tclsleep 1
+ if { [is_fixed_length $method] == 1 } {
+ rep_test_bulk $method $masterenv $masterdb $niter \
+ $start $start 0
+ } else {
+ rep_test_bulk $method $masterenv $masterdb 2 0 0 1
+ }
+ process_msgs $envlist
+
+ # Generally overflows cannot happen because large data gets
+ # broken up into overflow pages, and none will be larger than
+ # the buffer. However, if we're configured for debug_rop/wop
+ # then we record the data as is and will overflow.
+ #
+ set bulkovf2 [stat_field $masterenv rep_stat \
+ "Bulk buffer overflows"]
+ set bulkfill2 [stat_field $masterenv rep_stat \
+ "Bulk buffer fills"]
+ if { [is_fixed_length $method] == 0 } {
+ error_check_good fillstat1 \
+ [expr $bulkfill2 > $bulkfill1] 1
+ if { $debug_rop_wop == 1 } {
+ error_check_good overflows \
+ [expr $bulkovf2 > $bulkovf1] 1
+ } else {
+ error_check_good no_overflows $bulkovf2 0
+ }
+ }
+ }
+
+ # !!!
+ # Turn off bulk processing now on the master. We need to do
+ # this because some configurations (like debug_rop/wop) will
+ # generate log records when verifying the logs and databases.
+ # We want to control processing those messages.
+ #
+ error_check_good set_bulk [$masterenv rep_config {bulk off}] 0
+
+ if { $in_memory_log == 1 && $debug_rop_wop == 1 } {
+ puts "\t\tSkipping for in-memory log and debug_rop/wop"
+ } else {
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ $in_memory_log
+
+ if { $throttle == "throttle" } {
+ puts "\tRep$tnum.e: Verify throttling."
+ set new_thr \
+ [stat_field $masterenv rep_stat \
+ "Transmission limited"]
+ error_check_bad nthrottles1 $new_thr -1
+ error_check_bad nthrottles0 $new_thr 0
+ error_check_good nthrottles \
+ [expr $old_thr < $new_thr] 1
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv $clientdir2 $cl2env \
+ $in_memory_log
+ }
+ }
+
+ if { $throttle == "throttle" } {
+ error_check_good cclose [$cl2env close] 0
+ }
+
+ error_check_good dbclose [$masterdb close] 0
+ error_check_good mclose [$masterenv close] 0
+ error_check_good cclose [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep047.tcl b/db-4.8.30/test/rep047.tcl
new file mode 100644
index 0000000..26a96fb
--- /dev/null
+++ b/db-4.8.30/test/rep047.tcl
@@ -0,0 +1,266 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep047
+# TEST Replication and log gap bulk transfers.
+# TEST Set bulk transfer replication option.
+# TEST Run test. Start a new client (to test ALL_REQ and bulk).
+# TEST Run small test again. Clear messages for 1 client.
+# TEST Run small test again to test LOG_REQ gap processing and bulk.
+# TEST Process and verify on clients.
+#
+proc rep047 { method { nentries 200 } { tnum "047" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r):\
+ Replication and resend bulk transfer $msg"
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 2 logs are [lindex $l 2]"
+ rep047_sub $method $nentries $tnum $l $r $args
+ }
+ }
+}
+
+proc rep047_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global overflowword1 overflowword2
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set overflowword1 "0"
+ set overflowword2 "0"
+ set orig_tdir $testdir
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ set in_memory_log \
+ [expr { $m_logtype == "in-memory" || $c_logtype == "in-memory" || \
+ $c2_logtype == "in-memory" }]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir $repmemargs \
+ -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open two clients.
+ repladd 2
+ set cl_envcmd "berkdb_env -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT -home $clientdir $repmemargs \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ repladd 3
+ set cl2_envcmd "berkdb_env -create $c2_txnargs $c2_logargs \
+ $verbargs -errpfx CLIENT2 -home $clientdir2 $repmemargs \
+ -rep_client -rep_transport \[list 3 replsend\]"
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ error_check_good set_bulk [$masterenv rep_config {bulk on}] 0
+
+ puts "\tRep$tnum.a: Create and open master database"
+
+ # Set up databases as in-memory or on-disk.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ set omethod [convert_method $method]
+ set masterdb [eval {berkdb_open -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.b: Basic long running txn"
+ rep_test_bulk $method $masterenv $masterdb $niter 0 0 0
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv\
+ $clientdir $clientenv $in_memory_log 1 1
+
+ # Clean up after rep_verify: remove the temporary "prlog" file. Now
+ # that a newly joining client uses internal init, when the master scans
+ # its directory for database files it complains about prlog not looking
+ # like a proper db. This is harmless, but it does put a distracting
+ # error message into the test output.
+ #
+ file delete $masterdir/prlog
+
+ puts "\tRep$tnum.c: Bring new client online"
+ replclear 3
+ set bulkrec1 [stat_field $masterenv rep_stat "Bulk records stored"]
+ set bulkxfer1 [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ set clientenv2 [eval $cl2_envcmd $recargs]
+ error_check_good client_env [is_valid_env $clientenv2] TRUE
+ set envlist "{$masterenv 1} {$clientenv 2} {$clientenv2 3}"
+ process_msgs $envlist
+
+ #
+ # We know we added $niter items to the database so there should be
+ # at least $niter records stored to the log. Verify that
+ # when we brought client 2 online, we sent at least $niter more
+ # records via bulk.
+ #
+ set bulkrec2 [stat_field $masterenv rep_stat "Bulk records stored"]
+ set bulkxfer2 [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ set recstat [expr $bulkrec2 > [expr $bulkrec1 + $niter]]
+ error_check_good recstat $recstat 1
+ error_check_good xferstat [expr $bulkxfer2 > $bulkxfer1] 1
+ puts "\tRep$tnum.c.0: Take new client offline"
+
+ puts "\tRep$tnum.d: Run small test creating a log gap"
+ set skip $niter
+ set start $niter
+ set niter 10
+ rep_test_bulk $method $masterenv $masterdb $niter $start $skip 0
+ #
+ # Skip and clear messages for client 2.
+ #
+ replclear 3
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.e: Bring new client online again"
+ set envlist "{$masterenv 1} {$clientenv 2} {$clientenv2 3}"
+ #
+ # Since we're relying on the client to detect a gap and request missing
+ # records, reset gap parameters to small values. Otherwise,
+ # "wait_recs" is still set at its maximum "high" value, due to this
+ # client having been through an internal init. Send a record to
+ # create a real gap and not an end-of-record stream pause.
+ #
+ $masterenv txn_checkpoint -force
+ $clientenv2 rep_request 4000 128000
+ process_msgs $envlist
+ tclsleep 1
+ set bulkrec1 [stat_field $masterenv rep_stat "Bulk records stored"]
+ set bulkxfer1 [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ set skip [expr $skip + $niter]
+ set start $skip
+ rep_test_bulk $method $masterenv $masterdb $niter $start $skip 0
+
+ process_msgs $envlist
+ #
+ # We know we added 2*$niter items to the database so there should be
+ # at least 2*$niter records stored to the log. Verify that
+ # when we brought client 2 online, we sent at least 2*$niter more
+ # records via bulk.
+ #
+ set bulkrec2 [stat_field $masterenv rep_stat "Bulk records stored"]
+ set bulkxfer2 [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ set recstat [expr $bulkrec2 > [expr $bulkrec1 + [expr 2 * $niter]]]
+ error_check_good recstat $recstat 1
+ error_check_good xferstat [expr $bulkxfer2 > $bulkxfer1] 1
+
+ # Turn off bulk processing now on the master. We need to do
+ # this because some configurations (like debug_rop/wop) will
+ # generate log records when verifying the logs and databases.
+ # We want to control processing those messages.
+ #
+ error_check_good set_bulk [$masterenv rep_config {bulk off}] 0
+
+ rep_verify $masterdir $masterenv\
+ $clientdir $clientenv $in_memory_log 1 1
+
+ # Process messages again in case we are running with debug_rop.
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv\
+ $clientdir2 $clientenv2 $in_memory_log 1 1
+
+ error_check_good dbclose [$masterdb close] 0
+ error_check_good mclose [$masterenv close] 0
+ error_check_good cclose [$clientenv close] 0
+ error_check_good c2close [$clientenv2 close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep048.tcl b/db-4.8.30/test/rep048.tcl
new file mode 100644
index 0000000..f941601
--- /dev/null
+++ b/db-4.8.30/test/rep048.tcl
@@ -0,0 +1,186 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep048
+# TEST Replication and log gap bulk transfers.
+# TEST Have two master env handles. Turn bulk on in
+# TEST one (turns it on for both). Turn it off in the other.
+# TEST While toggling, send log records from both handles.
+# TEST Process message and verify master and client match.
+#
+proc rep048 { method { nentries 3000 } { tnum "048" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping test with -recover for \
+ in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication\
+ and toggling bulk transfer $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep048_sub $method $nentries $tnum $l $r $args
+ }
+ }
+}
+
+proc rep048_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global overflowword1
+ global overflowword2
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set orig_tdir $testdir
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+ set overflowword1 "0"
+ set overflowword2 "0"
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+ file mkdir $masterdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ set in_memory_log \
+ [expr { $m_logtype == "in-memory" || $c_logtype == "in-memory" }]
+
+ # In-memory logs require a large log buffer, and can not
+ # be used with -txn nosync. Adjust the args for master
+ # and client.
+ # This test has a long transaction, allocate a larger log
+ # buffer for in-memory test.
+ set m_logargs [adjust_logargs $m_logtype [expr 20 * 1024 * 1024]]
+ set c_logargs [adjust_logargs $c_logtype [expr 20 * 1024 * 1024]]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -errpfx MASTER $verbargs -home $masterdir $repmemargs \
+ -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client.
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -errpfx CLIENT $verbargs -home $clientdir $repmemargs \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.a: Create and open master databases"
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ set omethod [convert_method $method]
+ set masterdb [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ set scrlog $testdir/repscript.log
+ puts "\tRep$tnum.b: Fork child process."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep048script.tcl $scrlog $masterdir $databases_in_memory &]
+
+ # Wait for child process to start up.
+ while { 1 } {
+ if { [file exists $masterdir/marker.file] == 0 } {
+ tclsleep 1
+ } else {
+ tclsleep 1
+ break
+ }
+ }
+ # Run a modified test001 in the master (and update clients).
+ # Call it several times so make sure that we get descheduled.
+ puts "\tRep$tnum.c: Basic long running txn"
+ set div 10
+ set loop [expr $niter / $div]
+ set start 0
+ for { set i 0 } { $i < $div } {incr i} {
+ rep_test_bulk $method $masterenv $masterdb $loop $start $start 0
+ process_msgs $envlist
+ set start [expr $start + $loop]
+ tclsleep 1
+ }
+ error_check_good dbclose [$masterdb close] 0
+ set marker [open $masterdir/done.file w]
+ close $marker
+
+ set bulkxfer1 [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ error_check_bad bulk $bulkxfer1 0
+
+ puts "\tRep$tnum.d: Waiting for child ..."
+ # Watch until the child is done.
+ watch_procs $pid 5
+ process_msgs $envlist
+ set childname "child.db"
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ $in_memory_log 1 1
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 0 1 0 $childname
+
+ error_check_good mclose [$masterenv close] 0
+ error_check_good cclose [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep048script.tcl b/db-4.8.30/test/rep048script.tcl
new file mode 100644
index 0000000..99a8938
--- /dev/null
+++ b/db-4.8.30/test/rep048script.tcl
@@ -0,0 +1,84 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep048 script - toggle bulk transfer while updates are going on.
+
+# Usage: repscript masterdir
+# masterdir: master env directory
+# databases_in_memory: are we using named in-memory databases?
+#
+source ./include.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript masterdir"
+
+# Verify usage
+if { $argc != 2 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set masterdir [ lindex $argv 0 ]
+global databases_in_memory
+set databases_in_memory [ lindex $argv 1 ]
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+#
+# We need to set up our own machids.
+# Add 1 for master env id, and 2 for the clientenv id.
+#
+repladd 1
+repladd 2
+
+# Join the master env.
+set ma_cmd "berkdb_env_noerr -home $masterdir \
+ -txn -rep_master -rep_transport \[list 1 replsend\]"
+# set ma_cmd "berkdb_env_noerr -home $masterdir \
+# -verbose {rep on} -errfile /dev/stderr \
+# -txn -rep_master -rep_transport \[list 1 replsend\]"
+set masterenv [eval $ma_cmd]
+error_check_good script_menv_open [is_valid_env $masterenv] TRUE
+
+puts "Master open"
+if { $databases_in_memory } {
+ set dbname { "" "child.db" }
+} else {
+ set dbname "child.db"
+}
+set db [eval "berkdb_open -create -btree -auto_commit -env $masterenv $dbname"]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Create marker file
+set marker [open $masterdir/marker.file w]
+close $marker
+
+#
+# Keep toggling until the parent indicates it's done.
+#
+set tog "on"
+for { set i 0 } { [file exists $masterdir/done.file] == 0 } { incr i } {
+puts "Iter $i: Turn bulk $tog"
+ error_check_good bulk$tog [$masterenv rep_config [list bulk $tog]] 0
+ set t [$masterenv txn]
+ error_check_good db_put \
+ [eval $db put -txn $t $i data$i] 0
+ error_check_good txn_commit [$t commit] 0
+ if { $tog == "on" } {
+ set tog "off"
+ } else {
+ set tog "on"
+ }
+ tclsleep 1
+}
+# Close the envs
+error_check_good script_db_close [$db close] 0
+error_check_good script_master_close [$masterenv close] 0
+puts "\tRepscript completed successfully"
diff --git a/db-4.8.30/test/rep049.tcl b/db-4.8.30/test/rep049.tcl
new file mode 100644
index 0000000..3a62089
--- /dev/null
+++ b/db-4.8.30/test/rep049.tcl
@@ -0,0 +1,240 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep049
+# TEST Replication and delay syncing clients - basic test.
+# TEST
+# TEST Open and start up a master and two clients. Turn on delay sync
+# TEST in the delayed client. Change master, add data and process messages.
+# TEST Verify delayed client does not match. Make additional changes and
+# TEST update the delayted client. Verify all match.
+# TEST Add in a fresh delayed client to test delay of ALL_REQ.
+# TEST Process startup messages and verify freshc client has no database.
+# TEST Sync and verify fresh client matches.
+#
+proc rep049 { method { niter 10 } { tnum "049" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 4]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($r): Replication\
+ and ($method) delayed sync-up $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Swap Client logs are [lindex $l 1]"
+ puts "Rep$tnum: Delay Client logs are [lindex $l 2]"
+ puts "Rep$tnum: Fresh Client logs are [lindex $l 3]"
+ rep049_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep049_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set orig_tdir $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set env1dir $testdir/MASTERDIR
+ set env2dir $testdir/CLIENTDIR
+ set delaycldir $testdir/DELAYCLDIR
+ set freshcldir $testdir/FRESHCLDIR
+ file mkdir $env1dir
+ file mkdir $env2dir
+ file mkdir $delaycldir
+ file mkdir $freshcldir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set dc_logtype [lindex $logset 2]
+ set fc_logtype [lindex $logset 3]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set dc_logargs [adjust_logargs $dc_logtype]
+ set fc_logargs [adjust_logargs $fc_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set dc_txnargs [adjust_txnargs $dc_logtype]
+ set fc_txnargs [adjust_txnargs $fc_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $verbargs \
+ $repmemargs \
+ $m_logargs -errpfx ENV1 -cachesize {0 4194304 3} \
+ -home $env1dir -rep_transport \[list 1 replsend\]"
+ set env1 [eval $ma_envcmd $recargs -rep_master]
+ error_check_good master_env [is_valid_env $env1] TRUE
+ $env1 rep_limit 0 0
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $verbargs \
+ $repmemargs $c_logargs -errpfx ENV2 \
+ -home $env2dir -rep_transport \[list 2 replsend\]"
+ set env2 [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $env2] TRUE
+ $env2 rep_limit 0 0
+
+ repladd 3
+ set dc_envcmd "berkdb_env_noerr -create $dc_txnargs \
+ $repmemargs $verbargs $dc_logargs -errpfx ENV3 \
+ -home $delaycldir -rep_transport \[list 3 replsend\]"
+ set dcenv [eval $dc_envcmd $recargs -rep_client]
+ error_check_good client2_env [is_valid_env $dcenv] TRUE
+ $dcenv rep_limit 0 0
+
+ #
+ # !!!
+ # We're not using this client yet, but put its command up here.
+ # We'll do the repladd and execute this env command later.
+ #
+ set fc_envcmd "berkdb_env_noerr -create $fc_txnargs \
+ $repmemargs $fc_logargs -errpfx ENV4 $verbargs \
+ -home $freshcldir -rep_transport \[list 4 replsend\]"
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$env1 1} {$env2 2} {$dcenv 3}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.a: Run rep_test in master env."
+ set start 0
+ eval rep_test $method $env1 NULL $niter $start $start 0 $largs
+
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Set delayed sync on client. Basic test."
+ error_check_good set_delay [$dcenv rep_config {delayclient on}] 0
+ #
+ # Call sync when we're not delayed. Verify it just returns and
+ # that no messages are generated anywhere.
+ #
+ error_check_good sync1 [$dcenv rep_sync] 0
+ set nproced [proc_msgs_once $envlist NONE err]
+ error_check_good nproced $nproced 0
+
+ puts "\tRep$tnum.c: Swap master/client"
+ error_check_good downgrade [$env1 rep_start -client] 0
+ error_check_good upgrade [$env2 rep_start -master] 0
+
+ process_msgs $envlist
+
+ puts "\tRep$tnum.d: Run rep_test in new master env"
+ set start $niter
+ eval rep_test $method $env2 NULL $niter $start $start 0 $largs
+ process_msgs $envlist
+ #
+ # Delayed client should be different. Former master should be synced.
+ #
+ rep_verify $env2dir $env2 $env1dir $env1 1 1 1
+ rep_verify $env2dir $env2 $delaycldir $dcenv 0 0 0
+
+ puts "\tRep$tnum.e: Sync delayed client"
+ error_check_good rep_sync [$dcenv rep_sync] 0
+ process_msgs $envlist
+ #
+ # Delayed client should be the same now.
+ #
+ rep_verify $env2dir $env2 $delaycldir $dcenv 0 1 1
+
+ puts "\tRep$tnum.f: Run rep_test after sync-up in new master env"
+ set start [expr $start + $niter]
+ eval rep_test $method $env2 NULL $niter $start $start 0 $largs
+ process_msgs $envlist
+ #
+ # Delayed client be caught up and running fine.
+ #
+ rep_verify $env2dir $env2 $delaycldir $dcenv 0 1 1
+
+ puts "\tRep$tnum.g: Add in a fresh delayed client"
+ repladd 4
+ set fcenv [eval $fc_envcmd $recargs -rep_client]
+ error_check_good client3_env [is_valid_env $fcenv] TRUE
+ error_check_good set_delay [$fcenv rep_config {delayclient on}] 0
+
+ set envlist "{$env1 1} {$env2 2} {$dcenv 3} {$fcenv 4}"
+ process_msgs $envlist
+
+ # Verify that after processing the startup messages, the
+ # new client has no database and unmatched logs.
+ set dbname "test.db"
+ error_check_bad clientdb [file exists $freshcldir/$dbname] 1
+ rep_verify $env2dir $env2 $freshcldir $fcenv 0 0 1 NULL
+
+ puts "\tRep$tnum.h: Sync delayed client"
+ error_check_good rep_sync [$fcenv rep_sync] 0
+ process_msgs $envlist
+ #
+ # Delayed client should be the same now.
+ #
+ rep_verify $env2dir $env2 $freshcldir $fcenv 0 1 1
+
+ puts "\tRep$tnum.i: Closing"
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+ error_check_good dc_close [$dcenv close] 0
+ error_check_good fc_close [$fcenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
diff --git a/db-4.8.30/test/rep050.tcl b/db-4.8.30/test/rep050.tcl
new file mode 100644
index 0000000..d3b4820
--- /dev/null
+++ b/db-4.8.30/test/rep050.tcl
@@ -0,0 +1,362 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep050
+# TEST Replication and delay syncing clients - change master test.
+# TEST
+# TEST Open and start up master and 4 clients. Turn on delay for 3 clients.
+# TEST Switch masters, add data and verify delayed clients are out of date.
+# TEST Make additional changes to master. And change masters again.
+# TEST Sync/update delayed client and verify. The 4th client is a brand
+# TEST new delayed client added in to test the non-verify path.
+# TEST
+# TEST Then test two different things:
+# TEST 1. Swap master again while clients are still delayed.
+# TEST 2. Swap master again while sync is proceeding for one client.
+#
+proc rep050 { method { niter 10 } { tnum "050" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 5]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($r): Replication\
+ and ($method) delayed sync-up $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 0 logs are [lindex $l 1]"
+ puts "Rep$tnum: Delay Client 1 logs are [lindex $l 2]"
+ puts "Rep$tnum: Delay Client 2 logs are [lindex $l 3]"
+ puts "Rep$tnum: Delay Client 3 logs are [lindex $l 4]"
+ rep050_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep050_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set orig_tdir $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set env1dir $testdir/MASTERDIR
+ set env2dir $testdir/CLIENTDIR
+ set delaycldir1 $testdir/DELAYCLDIR.1
+ set delaycldir2 $testdir/DELAYCLDIR.2
+ set delaycldir3 $testdir/DELAYCLDIR.3
+ file mkdir $env1dir
+ file mkdir $env2dir
+ file mkdir $delaycldir1
+ file mkdir $delaycldir2
+ file mkdir $delaycldir3
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set dc1_logtype [lindex $logset 2]
+ set dc2_logtype [lindex $logset 3]
+ set dc3_logtype [lindex $logset 4]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set dc1_logargs [adjust_logargs $dc1_logtype]
+ set dc2_logargs [adjust_logargs $dc2_logtype]
+ set dc3_logargs [adjust_logargs $dc3_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set dc1_txnargs [adjust_txnargs $dc1_logtype]
+ set dc2_txnargs [adjust_txnargs $dc2_logtype]
+ set dc3_txnargs [adjust_txnargs $dc3_logtype]
+
+ #
+ # XXX rep050 delayed sync-up but change master:
+ # while client is delayed.
+ # while client is in the middle of delayed sync.
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -errpfx ENV1 $verbargs $repmemargs \
+ -home $env1dir -rep_transport \[list 1 replsend\]"
+ set env1 [eval $ma_envcmd $recargs -rep_master]
+ $env1 rep_limit 0 0
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -errpfx ENV2 $verbargs $repmemargs \
+ -cachesize {0 2097152 2} \
+ -home $env2dir -rep_transport \[list 2 replsend\]"
+ set env2 [eval $cl_envcmd $recargs -rep_client]
+ $env2 rep_limit 0 0
+
+ repladd 3
+ set dc1_envcmd "berkdb_env_noerr -create $dc1_txnargs \
+ $dc1_logargs -errpfx ENV3 $verbargs $repmemargs \
+ -home $delaycldir1 -rep_transport \[list 3 replsend\]"
+ set dc1env [eval $dc1_envcmd $recargs -rep_client]
+ $dc1env rep_limit 0 0
+
+ repladd 4
+ set dc2_envcmd "berkdb_env_noerr -create $dc2_txnargs \
+ $dc2_logargs -errpfx ENV4 $verbargs $repmemargs \
+ -home $delaycldir2 -rep_transport \[list 4 replsend\]"
+ set dc2env [eval $dc2_envcmd $recargs -rep_client]
+ $dc2env rep_limit 0 0
+
+ repladd 5
+ set dc3_envcmd "berkdb_env_noerr -create $dc3_txnargs \
+ $dc3_logargs -errpfx ENV5 $verbargs $repmemargs \
+ -home $delaycldir3 -rep_transport \[list 5 replsend\]"
+
+ # Bring the clients online by processing the startup messages.
+ # !!!
+ # NOTE: We set up dc3_envcmd but we do not open the env now.
+ # Therefore dc3env is not part of the envlist. However, since
+ # we did the repladd broadcast messages will be sent to it,
+ # but we will replclear before we start the env.
+ #
+ set envlist "{$env1 1} {$env2 2} {$dc1env 3} {$dc2env 4}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.a: Run rep_test in master env."
+ set start 0
+ eval rep_test $method $env1 NULL $niter $start $start 0 $largs
+
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Set delayed sync on clients 2 and 3"
+ error_check_good set_delay [$dc1env rep_config {delayclient on}] 0
+ error_check_good set_delay [$dc2env rep_config {delayclient on}] 0
+
+ set oplist { "delayed" "syncing" }
+
+ set masterenv $env1
+ set mid 1
+ set mdir $env1dir
+ set clientenv $env2
+ set cid 2
+ set cdir $env2dir
+ foreach op $oplist {
+ # Swap all the info we need.
+ set tmp $masterenv
+ set masterenv $clientenv
+ set clientenv $tmp
+
+ set tmp $mdir
+ set mdir $cdir
+ set cdir $mdir
+
+ set tmp $mid
+ set mid $cid
+ set cid $tmp
+
+ puts "\tRep$tnum.c: Swap master/client ($op)"
+ error_check_good downgrade [$clientenv rep_start -client] 0
+ error_check_good upgrade [$masterenv rep_start -master] 0
+ process_msgs $envlist
+
+ #
+ # !!!
+ # At this point, clients 2 and 3 should have DELAY set.
+ # We should # probably add a field to rep_stat
+ # to indicate that and test that here..
+ #
+ puts "\tRep$tnum.d: Run rep_test in new master env"
+ set start [expr $start + $niter]
+ eval rep_test $method $env2 NULL $niter $start $start 0 $largs
+ process_msgs $envlist
+
+ #
+ # Delayed clients should be different.
+ # Former master should by synced.
+ #
+ rep_verify $mdir $masterenv $cdir $clientenv 0 1 1
+ rep_verify $mdir $masterenv $delaycldir1 $dc1env 0 0 0
+ rep_verify $mdir $masterenv $delaycldir2 $dc2env 0 0 0
+
+ #
+ # Run rep_test again, but don't process on former master.
+ # This makes the master/client different from each other.
+ #
+ puts "\tRep$tnum.e: Run rep_test in new master env only"
+ set start [expr $start + $niter]
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ replclear $cid
+ replclear 3
+ replclear 4
+ replclear 5
+
+ puts "\tRep$tnum.f: Start 4th, clean delayed client."
+ set dc3env [eval $dc3_envcmd $recargs -rep_client]
+ error_check_good client4_env [is_valid_env $dc3env] TRUE
+ $dc3env rep_limit 0 0
+ error_check_good set_delay [$dc3env rep_config \
+ {delayclient on}] 0
+ set envlist "{$env1 1} {$env2 2} {$dc1env 3} \
+ {$dc2env 4} {$dc3env 5}"
+ process_msgs $envlist
+
+ #
+ # Now we have a master at point 1, a former master,
+ # now client at point 2, and two delayed clients at point 3.
+ # If 'delayed' swap masters now, while the clients are
+ # in the delayed state but not syncing yet.
+ # If 'syncing', first call rep_sync, and begin syncing the
+ # clients, then swap masters in the middle of that process.
+ #
+ set nextlet "g"
+ if { $op == "delayed" } {
+ # Swap all the info we need.
+ set tmp $masterenv
+ set masterenv $clientenv
+ set clientenv $tmp
+
+ set tmp $mdir
+ set mdir $cdir
+ set cdir $mdir
+
+ set tmp $mid
+ set mid $cid
+ set cid $tmp
+
+ puts "\tRep$tnum.g: Swap master/client while delayed"
+ set nextlet "h"
+ error_check_good downgrade \
+ [$clientenv rep_start -client] 0
+ error_check_good upgrade \
+ [$masterenv rep_start -master] 0
+ process_msgs $envlist
+ }
+ puts "\tRep$tnum.$nextlet: Run rep_test and sync delayed client"
+ set start [expr $start + $niter]
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ process_msgs $envlist
+ error_check_good rep_sync [$dc1env rep_sync] 0
+ error_check_good rep_sync [$dc3env rep_sync] 0
+ if { $op == "syncing" } {
+ #
+ # Process messages twice to get us into syncing,
+ # but not enough to complete it. Then swap.
+ #
+ set nproced [proc_msgs_once $envlist NONE err]
+ set nproced [proc_msgs_once $envlist NONE err]
+
+ # Swap all the info we need.
+ set tmp $masterenv
+ set masterenv $clientenv
+ set clientenv $tmp
+
+ set tmp $mdir
+ set mdir $cdir
+ set cdir $mdir
+
+ set tmp $mid
+ set mid $cid
+ set cid $tmp
+
+ puts "\tRep$tnum.h: Swap master/client while syncing"
+ error_check_good downgrade \
+ [$clientenv rep_start -client] 0
+ error_check_good upgrade \
+ [$masterenv rep_start -master] 0
+ }
+ #
+ # Now process all messages and verify.
+ #
+ puts "\tRep$tnum.i: Process all messages and verify."
+ process_msgs $envlist
+
+ #
+ # If we swapped during the last syncing, we need to call
+ # rep_sync again because the master changed again.
+ #
+ if { $op == "syncing" } {
+ error_check_good rep_sync [$dc1env rep_sync] 0
+ error_check_good rep_sync [$dc3env rep_sync] 0
+ process_msgs $envlist
+ }
+
+ #
+ # Delayed client should be the same now.
+ #
+ rep_verify $mdir $masterenv $delaycldir1 $dc1env 0 1 1
+ rep_verify $mdir $masterenv $delaycldir3 $dc3env 0 1 1
+ rep_verify $mdir $masterenv $delaycldir2 $dc2env 0 0 0
+ error_check_good dc3_close [$dc3env close] 0
+ env_cleanup $delaycldir3
+ set envlist "{$env1 1} {$env2 2} {$dc1env 3} {$dc2env 4}"
+
+ }
+ puts "\tRep$tnum.j: Sync up 2nd delayed client and verify."
+ error_check_good rep_sync [$dc2env rep_sync] 0
+ process_msgs $envlist
+ rep_verify $mdir $masterenv $delaycldir2 $dc2env 0 1 1
+
+ puts "\tRep$tnum.k: Closing"
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+ error_check_good dc1_close [$dc1env close] 0
+ error_check_good dc2_close [$dc2env close] 0
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
diff --git a/db-4.8.30/test/rep051.tcl b/db-4.8.30/test/rep051.tcl
new file mode 100644
index 0000000..1446e89
--- /dev/null
+++ b/db-4.8.30/test/rep051.tcl
@@ -0,0 +1,243 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep051
+# TEST Test of compaction with replication.
+# TEST
+# TEST Run rep_test in a replicated master environment.
+# TEST Delete a large number of entries and compact with -freespace.
+# TEST Propagate the changes to the client and make sure client and
+# TEST master match.
+
+proc rep051 { method { niter 1000 } { tnum "051" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Compaction is an option for btree and recno databases only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || \
+ [is_recno $method] == 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_hash $method] == 1 || [is_queue $method] == 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run tests with and without recovery. If we're doing testing
+ # of in-memory logging, skip the combination of recovery
+ # and in-memory logging -- it doesn't make sense.
+ set logsets [create_logsets 2]
+ set saved_args $args
+
+ foreach recopt $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $recopt == "-recover" && $logindex != -1 } {
+ puts "Skipping test \
+ with -recover for in-memory logs."
+ continue
+ }
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum: Replication with\
+ compaction ($method $recopt) $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep051_sub $method \
+ $niter $tnum $envargs $l $recopt $args
+ }
+ }
+}
+
+proc rep051_sub { method niter tnum envargs logset recargs largs } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ set verify_subset \
+ [expr { $m_logtype == "in-memory" || $c_logtype == "in-memory" }]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. Adjust the args for master
+ # and client.
+ # This test has a long transaction, allocate a larger log
+ # buffer for in-memory test.
+ set m_logargs [adjust_logargs $m_logtype [expr 2 * [expr 1024 * 1024]]]
+ set c_logargs [adjust_logargs $c_logtype [expr 2 * [expr 1024 * 1024]]]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create $verbargs \
+ -log_max 1000000 $envargs $m_logargs $recargs $repmemargs \
+ -home $masterdir -errpfx MASTER $m_txnargs -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create $verbargs \
+ -log_max 1000000 $envargs $c_logargs $recargs $repmemargs \
+ -home $clientdir -errpfx CLIENT $c_txnargs -rep_client \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C)]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Explicitly create the db handle so we can do deletes,
+ # and also to make the page size small.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit\
+ -pagesize 512 -create -mode 0644} $largs $omethod $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Run rep_test in the master and update client.
+ puts "\tRep$tnum.a:\
+ Running rep_test in replicated env ($envargs $recargs)."
+
+ eval rep_test $method $masterenv $db $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Verify that contents match.
+ puts "\tRep$tnum.b: Verifying client database contents."
+ rep_verify $masterdir $masterenv\
+ $clientdir $clientenv $verify_subset 1 1
+
+ # Delete most entries. Since some of our methods renumber,
+ # delete starting at $niter and working down to 0.
+ puts "\tRep$tnum.c: Remove most entries, by cursor."
+ set count [expr $niter - 1]
+ set n 20
+ set t [$masterenv txn]
+ error_check_good txn [is_valid_txn $t $masterenv] TRUE
+ set txn "-txn $t"
+
+ set dbc [eval {$db cursor} $txn]
+
+ # Leave every nth item.
+ set dbt [$dbc get -first]
+ while { $count > 0 } {
+ if { [expr $count % $n] != 0 } {
+ error_check_good dbc_del [$dbc del] 0
+ }
+ set dbt [$dbc get -next]
+ incr count -1
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good t_commit [$t commit] 0
+
+ # Open read-only handle on client, so we can call $db stat.
+ set client_db \
+ [eval {berkdb_open_noerr} -env $clientenv -rdonly $dbname]
+ error_check_good client_open [is_valid_db $client_db] TRUE
+
+ # Check database size on both client and master.
+ process_msgs $envlist
+ set master_pages_before [stat_field $db stat "Page count"]
+ set client_pages_before [stat_field $client_db stat "Page count"]
+ error_check_good \
+ pages_match_before $client_pages_before $master_pages_before
+
+ # Compact database.
+ puts "\tRep$tnum.d: Compact database."
+ set t [$masterenv txn]
+ error_check_good txn [is_valid_txn $t $masterenv] TRUE
+ set txn "-txn $t"
+
+ set ret [eval {$db compact} $txn {-freespace}]
+
+ error_check_good t_commit [$t commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ # There will be fewer pages in use after the compact -freespace call.
+ set master_pages_after [stat_field $db stat "Page count"]
+ set page_reduction [expr $master_pages_before - $master_pages_after]
+ error_check_good page_reduction [expr $page_reduction > 0] 1
+
+ # Process messages so the client sees the reduction in pages used.
+ process_msgs $envlist
+
+ set client_pages_after [stat_field $client_db stat "Page count"]
+ error_check_good \
+ pages_match_after $client_pages_after $master_pages_after
+
+ # Close client handle.
+ error_check_good client_handle [$client_db close] 0
+
+ # Reverify.
+ puts "\tRep$tnum.b: Verifying client database contents."
+ rep_verify $masterdir $masterenv\
+ $clientdir $clientenv $verify_subset 1 1
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep052.tcl b/db-4.8.30/test/rep052.tcl
new file mode 100644
index 0000000..b2cc02a
--- /dev/null
+++ b/db-4.8.30/test/rep052.tcl
@@ -0,0 +1,252 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep052
+# TEST Test of replication with NOWAIT.
+# TEST
+# TEST One master, one client. After initializing
+# TEST everything normally, close client and let the
+# TEST master get ahead -- far enough that the master
+# TEST no longer has the client's last log file.
+# TEST Reopen the client and turn on NOWAIT.
+# TEST Process a few messages to get the client into
+# TEST recovery mode, and verify that lockout occurs
+# TEST on a txn API call (txn_begin) and an env API call.
+# TEST Process all the messages and verify that lockout
+# TEST is over.
+
+proc rep052 { method { niter 200 } { tnum "052" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set saved_args $args
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+ set saved_args $args
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery. Skip
+ # recovery with in-memory logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ set envargs ""
+ set args $saved_args
+ puts "Rep$tnum ($method $envargs $r $args):\
+ Test lockouts with REP_NOWAIT $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep052_sub $method $niter $tnum $envargs \
+ $l $r $args
+ }
+ }
+}
+
+proc rep052_sub { method niter tnum envargs logset recargs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $verbargs \
+ $repmemargs \
+ $m_logargs -log_max $log_max $envargs -errpfx MASTER \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+ $masterenv rep_limit 0 0
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $verbargs \
+ $repmemargs \
+ $c_logargs -log_max $log_max $envargs -errpfx CLIENT \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ $clientenv rep_limit 0 0
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ # Find out what exists on the client before closing. We'll need
+ # to loop until the first master log file > last client log file.
+ set last_client_log [get_logfile $clientenv last]
+
+ puts "\tRep$tnum.b: Close client."
+ error_check_good client_close [$clientenv close] 0
+
+ # Find out what exists on the client. We need to loop until
+ # the first master log file > last client log file.
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory" } {
+ set res \
+ [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ # Make sure we have a gap between the last client log and
+ # the first master log. This is easy with on-disk logs, since
+ # we archive, but will take longer with in-memory logging.
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.e: Reopen client."
+ env_cleanup $clientdir
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ $clientenv rep_limit 0 0
+ set envlist "{$masterenv 1} {$clientenv 2}"
+
+ # Turn on nowait.
+ $clientenv rep_config {nowait on}
+
+ # Process messages a few times, just enough to get client
+ # into lockout/recovery mode, but not enough to complete recovery.
+ set iter 3
+ for { set i 0 } { $i < $iter } { incr i } {
+ set nproced [proc_msgs_once $envlist NONE err]
+ }
+
+ puts "\tRep$tnum.f: Verify we are locked out of txn API calls."
+ if { [catch { set txn [$clientenv txn] } res] } {
+ error_check_good txn_lockout [is_substr $res "DB_REP_LOCKOUT"] 1
+ } else {
+ error "FAIL:[timestamp] Not locked out of txn API calls."
+ }
+
+ puts "\tRep$tnum.g: Verify we are locked out of env API calls."
+ if { [catch { set stat [$clientenv lock_stat] } res] } {
+ error_check_good env_lockout [is_substr $res "DB_REP_LOCKOUT"] 1
+ } else {
+ error "FAIL:[timestamp] Not locked out of env API calls."
+ }
+
+ # Now catch up and make sure we're not locked out anymore.
+ process_msgs $envlist
+
+ puts "\tRep$tnum.h: No longer locked out of txn API calls."
+ if { [catch { set txn [$clientenv txn] } res] } {
+ puts "FAIL: unable to start txn: $res"
+ } else {
+ error_check_good txn_no_lockout [$txn commit] 0
+ }
+
+ puts "\tRep$tnum.i: No longer locked out of env API calls."
+ if { [catch { set stat [$clientenv rep_stat] } res] } {
+ puts "FAIL: unable to make env call: $res"
+ }
+
+ puts "\tRep$tnum.h: Verify logs and databases"
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep053.tcl b/db-4.8.30/test/rep053.tcl
new file mode 100644
index 0000000..c5f5cba
--- /dev/null
+++ b/db-4.8.30/test/rep053.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep053
+# TEST Replication and basic client-to-client synchronization.
+# TEST
+# TEST Open and start up master and 1 client.
+# TEST Start up a second client later and verify it sync'ed from
+# TEST the original client, not the master.
+#
+proc rep053 { method { niter 200 } { tnum "053" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ set throttle { "throttle" "" }
+ foreach r $test_recopts {
+ foreach t $throttle {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $t): Replication\
+ and client-to-client sync up $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: Client2 logs are [lindex $l 2]"
+ rep053_sub $method $niter $tnum $l $r $t $args
+ }
+ }
+ }
+}
+
+proc rep053_sub { method niter tnum logset recargs throttle largs } {
+ global anywhere
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set orig_tdir $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set delaycldir1 $testdir/DELAYCLDIR.1
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $delaycldir1
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -errpfx MASTER $verbargs $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open two clients
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -errpfx CLIENT $verbargs $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # If throttling is specified, turn it on here. Throttle the
+ # client, since this is a test of client-to-client sync-up.
+ if { $throttle == "throttle" } {
+ error_check_good \
+ throttle [$clientenv rep_limit 0 [expr 8 * 1024]] 0
+ }
+
+ #
+ # Set up delayed client command, but don't eval until later.
+ # !!! Do NOT put the 'repladd' call here because we don't
+ # want this client to already have the backlog of records
+ # when it starts.
+ #
+ set dc1_envcmd "berkdb_env_noerr -create $c2_txnargs \
+ $c2_logargs -errpfx DELAYCL $verbargs $repmemargs \
+ -home $delaycldir1 -rep_transport \[list 3 replsend\]"
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.a: Run rep_test in master env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Start new client."
+ set anywhere 1
+ repladd 3
+ set newclient [eval $dc1_envcmd $recargs -rep_client]
+ error_check_good client2_env [is_valid_env $newclient] TRUE
+
+ set envlist "{$masterenv 1} {$clientenv 2} {$newclient 3}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.c: Verify sync-up from client."
+ set req [stat_field $clientenv rep_stat "Client service requests"]
+ set miss [stat_field $clientenv rep_stat "Client service req misses"]
+ set rereq [stat_field $newclient rep_stat "Client rerequests"]
+
+ # To complete the internal init, we need a PAGE_REQ and a LOG_REQ. These
+ # requests get served by $clientenv. Since the end-of-range specified
+ # in the LOG_REQ points to the very end of the log (i.e., the LSN given
+ # in the NEWMASTER message), the serving client gets NOTFOUND in its log
+ # cursor reading loop, and can't tell whether it simply hit the end, or
+ # is really missing sufficient log records to fulfill the request. So
+ # it counts a "miss" and generates a rerequest. When internal init
+ # finishes recovery, it sends an ALL_REQ, for a total of 3 requests in
+ # the simple case, and more than 3 in the "throttle" case.
+ #
+
+ set expected_msgs 3
+ if { [is_queue $method] } {
+ # Queue database require an extra request
+ # to retrieve the meta page.
+ incr expected_msgs
+ }
+
+ if { $throttle == "throttle" } {
+ error_check_good req [expr $req > $expected_msgs] 1
+ } else {
+ error_check_good min_req [expr $req >= $expected_msgs] 1
+ set max_expected_msgs [expr $expected_msgs * 2]
+ error_check_good max_req [expr $req <= $max_expected_msgs] 1
+ }
+ error_check_good miss=rereq $miss $rereq
+
+ # Check for throttling.
+ if { $throttle == "throttle" } {
+ set num_throttles \
+ [stat_field $clientenv rep_stat "Transmission limited"]
+ error_check_bad client_throttling $num_throttles 0
+ }
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 1 1
+
+ # Process messages again in case we are running with debug_rop.
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv $delaycldir1 $newclient 0 1 1
+
+ puts "\tRep$tnum.d: Run rep_test more in master env and verify."
+ set niter 10
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 1 1
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv $delaycldir1 $newclient 0 1 1
+
+ puts "\tRep$tnum.e: Closing"
+ error_check_good master_close [$masterenv close] 0
+ error_check_good client_close [$clientenv close] 0
+ error_check_good dc1_close [$newclient close] 0
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ set anywhere 0
+ return
+}
diff --git a/db-4.8.30/test/rep054.tcl b/db-4.8.30/test/rep054.tcl
new file mode 100644
index 0000000..7f5a8a7
--- /dev/null
+++ b/db-4.8.30/test/rep054.tcl
@@ -0,0 +1,275 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep054
+# TEST Test of internal initialization where a far-behind
+# TEST client takes over as master.
+# TEST
+# TEST One master, two clients.
+# TEST Run rep_test and process.
+# TEST Close client 1.
+# TEST Run rep_test, opening new databases, and processing
+# TEST messages. Archive as we go so that log files get removed.
+# TEST Close master and reopen client 1 as master. Process messages.
+# TEST Verify that new master and client are in sync.
+# TEST Run rep_test again, adding data to one of the new
+# TEST named databases.
+
+proc rep054 { method { nentries 200 } { tnum "054" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # Skip this test for named in-memory databases; it tries
+ # to close and re-open envs, which just won't work.
+ if { $databases_in_memory } {
+ puts "Skipping Rep$tnum for in-memory databases."
+ return
+ }
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $args): Internal\
+ initialization test: far-behind client\
+ becomes master $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: Client2 logs are [lindex $l 2]"
+
+ rep054_sub $method $nentries $tnum $l $r $args
+ }
+ }
+}
+
+proc rep054_sub { method nentries tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global errorInfo
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ set omethod [convert_method $method]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -log_max $log_max $verbargs $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -log_max $log_max $verbargs $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Open 2nd client
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c2_txnargs \
+ $c2_logargs -log_max $log_max $verbargs $repmemargs \
+ -home $clientdir2 -rep_transport \[list 3 replsend\]"
+ set clientenv2 [eval $cl2_envcmd $recargs -rep_client]
+ error_check_good client2_env [is_valid_env $clientenv2] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2} {$clientenv2 3}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master and in each client.
+ puts "\tRep$tnum.a: Running rep_test in master & clients."
+ set start 0
+ eval rep_test $method $masterenv NULL $nentries $start $start 0 $largs
+ incr start $nentries
+ process_msgs $envlist
+
+ # Master is in sync with both clients.
+ rep_verify $masterdir $masterenv $clientdir $clientenv
+
+ # Process messages again in case we are running with debug_rop.
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2
+
+ # Identify last log on client, then close. Loop until the first
+ # master log file is greater than the last client log file.
+ set last_client_log [get_logfile $clientenv last]
+
+ puts "\tRep$tnum.b: Close client 1."
+ error_check_good client_close [$clientenv close] 0
+ set envlist "{$masterenv 1} {$clientenv2 3}"
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $nentries \
+ $start $start 0 $largs
+ incr start $nentries
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ # Make sure we have a gap between the last client log and
+ # the first master log.
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ # Create a database that does not even exist on client 1.
+ set newfile "newtest.db"
+ set newdb [eval {berkdb_open_noerr -env $masterenv -create \
+ -auto_commit -mode 0644} $largs $omethod $newfile]
+ error_check_good newdb_open [is_valid_db $newdb] TRUE
+ eval rep_test $method $masterenv $newdb $nentries $start $start 0 $largs
+ set start [expr $start + $nentries]
+ process_msgs $envlist
+
+ # Identify last master log file.
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ set last_master_log [get_logfile $masterenv last]
+ set stop 0
+
+ # Send the master and client2 far ahead of client 1. Archive
+ # so there will be a gap between the log files of the closed
+ # client and the active master and client and we've
+ # archived away the creation of the new database.
+ puts "\tRep$tnum.e: Running rep_test in master & remaining client."
+ while { $stop == 0 } {
+
+ eval rep_test \
+ $method $masterenv NULL $nentries $start $start 0 $largs
+ incr start $nentries
+
+ process_msgs $envlist
+
+ puts "\tRep$tnum.f: Send master ahead of closed client."
+ if { $m_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ if { $c2_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive -d -h $clientdir2]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_master_log } {
+ set stop 1
+ }
+ }
+ process_msgs $envlist
+
+ # Master is in sync with client 2.
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1
+
+ # Close master.
+ puts "\tRep$tnum.g: Close master."
+ error_check_good newdb_close [$newdb close] 0
+ error_check_good close_master [$masterenv close] 0
+
+ # The new database is still there.
+ error_check_good newfile_exists [file exists $masterdir/$newfile] 1
+
+ puts "\tRep$tnum.h: Reopen client1 as master."
+ replclear 2
+ set newmasterenv [eval $cl_envcmd $recargs -rep_master]
+ error_check_good newmasterenv [is_valid_env $newmasterenv] TRUE
+
+ # Force something into the log
+ $newmasterenv txn_checkpoint -force
+
+ puts "\tRep$tnum.i: Reopen master as client."
+ set oldmasterenv [eval $ma_envcmd $recargs -rep_client]
+ error_check_good oldmasterenv [is_valid_env $oldmasterenv] TRUE
+ set envlist "{$oldmasterenv 1} {$newmasterenv 2} {$clientenv2 3}"
+ process_msgs $envlist
+
+ rep_verify $clientdir $newmasterenv $masterdir $oldmasterenv 1
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good oldmasterenv_close [$oldmasterenv close] 0
+ error_check_good clientenv2_close [$clientenv2 close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep055.tcl b/db-4.8.30/test/rep055.tcl
new file mode 100644
index 0000000..910e704
--- /dev/null
+++ b/db-4.8.30/test/rep055.tcl
@@ -0,0 +1,242 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep055
+# TEST Test of internal initialization and log archiving.
+# TEST
+# TEST One master, one client.
+# TEST Generate several log files.
+# TEST Remove old master log files and generate several more.
+# TEST Get list of archivable files from db_archive and restart client.
+# TEST As client is in the middle of internal init, remove
+# TEST the log files returned earlier by db_archive.
+#
+proc rep055 { method { niter 200 } { tnum "055" } args } {
+
+ source ./include.tcl
+ global mixed_mode_logging
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ # This test is all about log archive issues, so don't run with
+ # in-memory logging.
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning.
+ set opts { clean noclean }
+ foreach r $test_recopts {
+ foreach c $opts {
+ puts "Rep$tnum ($method $r $c $args):\
+ Test of internal initialization $msg $msg2."
+ rep055_sub $method $niter $tnum $r $c $args
+
+ }
+ }
+}
+
+proc rep055_sub { method niter tnum recargs opts largs } {
+ global testdir
+ global passwd
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx MASTER \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+ $masterenv rep_limit 0 0
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx CLIENT \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ $clientenv rep_limit 0 0
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ error_check_good client_close [$clientenv close] 0
+
+ # Find out what exists on the client. We need to loop until
+ # the first master log file > last client log file.
+ # This forces internal init to happen.
+
+ set res [eval exec $util_path/db_archive -l -h $clientdir]
+ set last_client_log [lindex [lsort $res] end]
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ if { [lsearch -exact $res $last_client_log] == -1 } {
+ set stop 1
+ }
+ }
+
+ # Find out what exists on the master. We need to loop until
+ # the master log changes. This is required so that we can
+ # have a log_archive waiting to happen.
+ #
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ set last_master_log [lindex [lsort $res] end]
+ set stop 0
+ puts "\tRep$tnum.e: Move master logs forward again."
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ replclear 2
+
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ set last_log [lindex [lsort $res] end]
+ if { $last_log != $last_master_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.f: Get list of files for removal."
+ set logs [eval exec $util_path/db_archive -h $masterdir]
+
+ puts "\tRep$tnum.g: Reopen client ($opts)."
+ if { $opts == "clean" } {
+ env_cleanup $clientdir
+ }
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ $clientenv rep_limit 0 0
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ #
+ # Process messages once to get partially through internal init.
+ #
+ proc_msgs_once $envlist NONE err
+
+ if { $opts != "clean" } {
+ puts "\tRep$tnum.g.1: Trigger log request"
+ #
+ # When we don't clean, starting the client doesn't
+ # trigger any events. We need to generate some log
+ # records so that the client requests the missing
+ # logs and that will trigger it.
+ #
+ set entries 10
+ eval rep_test $method $masterenv NULL $entries $niter 0 0 $largs
+ #
+ # Process messages three times to get us into internal init
+ # but not enough to get us all the way through it.
+ #
+ proc_msgs_once $envlist NONE err
+ proc_msgs_once $envlist NONE err
+ proc_msgs_once $envlist NONE err
+ }
+
+ #
+ # Now in the middle of internal init, remove the log files
+ # db_archive reported earlier.
+ #
+ foreach l $logs {
+ fileremove -f $masterdir/$l
+ }
+ #
+ # Now finish processing all the messages.
+ #
+ process_msgs $envlist 0 NONE err
+
+ puts "\tRep$tnum.h: Verify logs and databases"
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep058.tcl b/db-4.8.30/test/rep058.tcl
new file mode 100644
index 0000000..2d8cce4
--- /dev/null
+++ b/db-4.8.30/test/rep058.tcl
@@ -0,0 +1,149 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep058
+# TEST
+# TEST Replication with early databases
+# TEST
+# TEST Mimic an application where they create a database before
+# TEST calling rep_start, thus writing log records on a client
+# TEST before it is a client. Verify we cannot join repl group.
+
+proc rep058 { method { tnum "058" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # There should be no difference with methods. Just use btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep058: Skipping for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping test with -recover for\
+ in-memory logs."
+ continue
+ }
+
+ puts "Rep$tnum ($method $r): Replication with \
+ pre-created databases $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep058_sub $method $tnum $l $r $args
+ }
+ }
+}
+
+proc rep058_sub { method tnum logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set orig_tdir $testdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. Adjust the args for master
+ # and client.
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ set omethod [convert_method $method]
+
+ # Open a master.
+ repladd 1
+ set envcmd(M) "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -lock_detect default $verbargs $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set menv [eval $envcmd(M) $recargs]
+
+ # Open a client
+ repladd 2
+ set envcmd(C) "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -lock_detect default $verbargs $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set cenv [eval $envcmd(C) $recargs]
+ error_check_good client_env [is_valid_env $cenv] TRUE
+
+ puts "\tRep$tnum.a: Create same database in both envs."
+ set dbname "test.db"
+ set mdb [eval {berkdb_open_noerr -env $menv -create \
+ -auto_commit -mode 0644} -btree $dbname]
+ error_check_good open [is_valid_db $mdb] TRUE
+ set cdb [eval {berkdb_open_noerr -env $cenv -create \
+ -auto_commit -mode 0644} -btree $dbname]
+ error_check_good open [is_valid_db $cdb] TRUE
+
+ puts "\tRep$tnum.b: Start master and client now."
+ error_check_good master [$menv rep_start -master] 0
+ error_check_good client [$cenv rep_start -client] 0
+ #
+ # We'll only catch this error if we turn on no-autoinit.
+ # Otherwise, the system will throw away everything on the
+ # client and resync.
+ #
+ $cenv rep_config {noautoinit on}
+
+ set envlist "{$menv 1} {$cenv 2}"
+ process_msgs $envlist 0 NONE err
+ error_check_good msg_err [is_substr $err "REP_JOIN_FAILURE"] 1
+
+ puts "\tRep$tnum.c: Clean up."
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good cdb_close [$mdb close] 0
+
+ error_check_good menv_close [$menv close] 0
+ error_check_good cenv_close [$cenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+ set testdir $orig_tdir
+ return
+}
+
diff --git a/db-4.8.30/test/rep060.tcl b/db-4.8.30/test/rep060.tcl
new file mode 100644
index 0000000..a78c7cd
--- /dev/null
+++ b/db-4.8.30/test/rep060.tcl
@@ -0,0 +1,346 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep060
+# TEST Test of normally running clients and internal initialization.
+# TEST Have a client running normally, but slow/far behind the master.
+# TEST Then the master checkpoints and archives, causing the client
+# TEST to suddenly be thrown into internal init. This test tests
+# TEST that we clean up the old files/pages in mpool and dbreg.
+# TEST Also test same thing but the app holding an open dbp as well.
+#
+proc rep060 { method { niter 200 } { tnum "060" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ # Run for btree and queue only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || \
+ [is_queue $method] == 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 && [is_queue $method] != 1 } {
+ puts "Skipping rep060 for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ #
+ # 'user' means that the "app" (the test in this case) has
+ # its own handle open to the database.
+ set opts { "" user }
+ foreach r $test_recopts {
+ foreach o $opts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $o $args):\
+ Test of internal initialization and\
+ slow client $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep060_sub $method $niter $tnum $l $r $o $args
+ }
+ }
+ }
+}
+
+proc rep060_sub { method niter tnum logset recargs opt largs } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 4]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $repmemargs \
+ $m_logargs -log_max $log_max -errpfx MASTER $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ puts "\tRep$tnum.a: Open client."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $repmemargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Set a low limit so that there are lots of reps between
+ # master and client. This allows greater control over
+ # the test.
+ error_check_good thr [$masterenv rep_limit 0 [expr 10 * 1024]] 0
+
+ # It is *key* to this test that we have a database handle
+ # open for the duration of the test. The problem this
+ # test checks for regards internal init when there are open
+ # database handles around.
+ #
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Put some data into the database, running the master up past
+ # log file 10, discarding messages to the client so that it will
+ # be forced to request them as a gap.
+ #
+ puts "\tRep$tnum.c: Run rep_test in master env."
+ set start 0
+
+ set stop 0
+ set endlog 10
+ while { $stop == 0 } {
+ # Run test in the master (don't update client).
+ eval rep_test $method \
+ $masterenv $db $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+
+ if { $m_logtype != "in-memory" } {
+ set res \
+ [eval exec $util_path/db_archive -l -h $masterdir]
+ }
+ # Make sure the master has gone as far as we requested.
+ set last_master_log [get_logfile $masterenv last]
+ if { $last_master_log > $endlog } {
+ set stop 1
+ }
+ }
+
+ # Do one more set of txns at the master, replicating log records
+ # normally, to give the client a chance to notice how many messages
+ # it is missing.
+ #
+ eval rep_test $method $masterenv $db $niter $start $start 0 $largs
+ incr start $niter
+
+ set stop 0
+ set client_endlog 5
+ set last_client_log 0
+ set nproced 0
+ incr nproced [proc_msgs_once $envlist NONE err]
+ incr nproced [proc_msgs_once $envlist NONE err]
+
+ puts "\tRep$tnum.d: Client catches up partway."
+ error_check_good ckp [$masterenv txn_checkpoint] 0
+
+ # We have checkpointed on the master, but we want to get the
+ # client a healthy way through the logs before archiving on
+ # the master.
+ while { $stop == 0 } {
+ set nproced 0
+ incr nproced [proc_msgs_once $envlist NONE err]
+ if { $nproced == 0 } {
+ error_check_good \
+ ckp [$masterenv txn_checkpoint -force] 0
+ }
+
+ # Stop processing when the client is partway through.
+ if { $c_logtype != "in-memory" } {
+ set res \
+ [eval exec $util_path/db_archive -l -h $clientdir]
+ }
+ set last_client_log [get_logfile $clientenv last]
+ set first_client_log [get_logfile $clientenv first]
+ if { $last_client_log > $client_endlog } {
+ set stop 1
+ }
+ }
+
+ #
+ # The user may have the database open itself.
+ #
+ if { $opt == "user" } {
+ set cdb [eval {berkdb_open_noerr -env} $clientenv $dbname]
+ error_check_good dbopen [is_valid_db $cdb] TRUE
+ set ccur [$cdb cursor]
+ error_check_good curs [is_valid_cursor $ccur $cdb] TRUE
+ set ret [$ccur get -first]
+ set kd [lindex $ret 0]
+ set key [lindex $kd 0]
+ error_check_good cclose [$ccur close] 0
+ } else {
+ set cdb NULL
+ }
+
+ # Now that the client is well on its way of normal processing,
+ # simply fairly far behind the master, archive on the master,
+ # removing the log files the client needs, sending it into
+ # internal init with the database pages reflecting the client's
+ # current LSN.
+ #
+ puts "\tRep$tnum.e: Force internal initialization."
+ if { $m_logtype != "in-memory" } {
+ puts "\tRep$tnum.e1: Archive on master."
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ } else {
+ # Master is in-memory, and we'll need a different
+ # technique to create the gap forcing internal init.
+ puts "\tRep$tnum.e1: Run rep_test until gap is created."
+ set stop 0
+ while { $stop == 0 } {
+ eval rep_test $method $masterenv \
+ NULL $niter $start $start 0 $largs
+ incr start $niter
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+ }
+
+ puts "\tRep$tnum.f: Process messages."
+ if { $opt == "user" } {
+ for { set loop 0 } { $loop < 5 } { incr loop } {
+ set nproced 0
+ incr nproced [proc_msgs_once $envlist]
+ if { $cdb == "NULL" } {
+ continue
+ }
+ puts "\tRep$tnum.g.$loop: Check user database."
+ set status [catch {$cdb get $key} ret]
+ if { $status != 0 } {
+ #
+ # For db operations, DB doesn't block, but
+ # returns DEADLOCK.
+ #
+ set is_lock [is_substr $ret DB_LOCK_DEADLOCK]
+ set is_dead [is_substr $ret DB_REP_HANDLE_DEAD]
+ error_check_good lock_dead \
+ [expr $is_lock || $is_dead] 1
+ if { $is_dead } {
+ error_check_good cclose [$cdb close] 0
+ set cdb NULL
+ }
+ }
+ }
+ }
+ process_msgs $envlist
+
+ #
+ # If we get through the user loop with a valid db, then it better
+ # be a dead handle after we've completed processing all the
+ # messages and running recovery.
+ #
+ if { $cdb != "NULL" } {
+ puts "\tRep$tnum.h: Check dead handle."
+ set status [catch {$cdb get $key} ret]
+ error_check_good status $status 1
+ error_check_good is_dead [is_substr $ret DB_REP_HANDLE_DEAD] 1
+ error_check_good cclose [$cdb close] 0
+ puts "\tRep$tnum.i: Verify correct internal initialization."
+ } else {
+ puts "\tRep$tnum.h: Verify correct internal initialization."
+ }
+ error_check_good close [$db close] 0
+ process_msgs $envlist
+
+ # We have now forced an internal initialization. Verify it is correct.
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ # Check that logs are in-memory or on-disk as expected.
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep061.tcl b/db-4.8.30/test/rep061.tcl
new file mode 100644
index 0000000..275167a
--- /dev/null
+++ b/db-4.8.30/test/rep061.tcl
@@ -0,0 +1,443 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep061
+# TEST Test of internal initialization multiple files and pagesizes
+# TEST with page gaps.
+# TEST
+# TEST One master, one client.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST Delete client files and restart client.
+# TEST Put one more record to the master.
+# TEST Force some page messages to get dropped.
+#
+proc rep061 { method { niter 500 } { tnum "061" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree and queue only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || [is_queue $method] == 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 && [is_queue $method] != 1 } {
+ puts "Skipping rep061 for method $method."
+ return
+ }
+ set args [convert_args $method $args]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning.
+ set opts { noclean clean bulk }
+ # Try varying drop percentages.
+ set dpct { 10 5 }
+ foreach r $test_recopts {
+ foreach c $opts {
+ foreach l $logsets {
+ foreach d $dpct {
+ set logindex [lsearch -exact $l \
+ "in-memory"]
+ if { $r == "-recover" && \
+ $logindex != -1 } {
+ puts "Skipping rep$tnum \
+ for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $c):\
+ Internal initialization - with\
+ $d pct page gaps, $msg $msg2."
+ puts "Rep$tnum: Master logs are \
+ [lindex $l 0]"
+ puts "Rep$tnum: Client logs are \
+ [lindex $l 1]"
+ rep061_sub $method $niter $tnum \
+ $l $r $c $d $args
+ }
+ }
+ }
+ }
+}
+
+proc rep061_sub { method niter tnum logset recargs opts dpct largs } {
+ global testdir
+ global util_path
+ global drop drop_msg
+ global startup_done
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set maxpg 16384
+ set log_max [expr $maxpg * 8]
+ set cache [expr $maxpg * 32]
+ if { $repfiles_in_memory } {
+ set cache [expr ($maxpg * 32) + (3000 * 1024)]
+ }
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $verbargs \
+ $repmemargs \
+ -log_max $log_max -cachesize { 0 $cache 1 } -errpfx MASTER \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $verbargs \
+ $repmemargs \
+ -log_max $log_max -cachesize { 0 $cache 1 } -errpfx CLIENT \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ #
+ # Since we're dropping messages, set the rerequest values
+ # lower so we don't wait too long to request what we're
+ # missing.
+ #
+ #
+ # Set to 200/800 usecs. An average ping to localhost should
+ # be a few 10s usecs.
+ #
+ $clientenv rep_request 200 800
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ #
+ # Note that by setting these 2 globals below, message dropping
+ # is automatically enabled. By setting 'drop' to 0, further
+ # down in the test, we disable message dropping.
+ #
+ set drop 1
+ set drop_msg [expr 100 / $dpct]
+
+ # Run rep_test in the master (and update client).
+ set startpgsz 512
+ set pglist ""
+ for { set pgsz $startpgsz } { $pgsz <= $maxpg } \
+ { set pgsz [expr $pgsz * 2] } {
+ lappend pglist $pgsz
+ }
+ set nfiles [llength $pglist]
+ puts "\tRep$tnum.a.0: Running rep_test $nfiles times in replicated env."
+ set dbopen ""
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set mult [expr $i * 10]
+ set nentries [expr $niter + $mult]
+ set pagesize [lindex $pglist $i]
+ set largs " -pagesize $pagesize "
+ eval rep_test $method $masterenv NULL $nentries $mult $mult \
+ 0 $largs
+ process_msgs $envlist
+
+ #
+ # Everytime we run 'rep_test' we create 'test.db'. So
+ # rename it each time through the loop.
+ #
+ set old "test.db"
+ set new "test.$i.db"
+
+ if { $databases_in_memory } {
+ error_check_good rename [$masterenv dbrename \
+ -auto_commit "" $old $new] 0
+ } else {
+ error_check_good rename [$masterenv dbrename \
+ -auto_commit $old $new] 0
+ }
+ process_msgs $envlist
+
+ #
+ # We want to keep some databases open so that we test the
+ # code finding the files in the data dir as well as finding
+ # them in dbreg list.
+ #
+ if { [expr $i % 2 ] == 0 } {
+ if { $databases_in_memory } {
+ set db [berkdb_open_noerr -env $masterenv "" $new]
+ } else {
+ set db [berkdb_open_noerr -env $masterenv $new]
+ }
+ error_check_good dbopen.$i [is_valid_db $db] TRUE
+ lappend dbopen $db
+ }
+ }
+ #
+ # Set up a few special databases too. We want one with a subdatabase
+ # and we want an empty database.
+ #
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ set emptyfile { "" "empty.db" }
+ } else {
+ set testfile "test.db"
+ set emptyfile "empty.db"
+ }
+
+ if { [is_queue $method] } {
+ set sub ""
+ } else {
+ set sub "subdb"
+ }
+ set omethod [convert_method $method]
+ set largs " -pagesize $maxpg "
+ set largs [convert_args $method $largs]
+
+ #
+ # Create/close an empty database.
+ #
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit -create \
+ -mode 0644} $largs $omethod $emptyfile]
+ error_check_good emptydb [is_valid_db $db] TRUE
+ error_check_good empty_close [$db close] 0
+
+ # If we're not using in-mem named databases, open a subdb and keep
+ # it open. (Do a regular db if method is queue.)
+ # We need it a few times later on.
+ #
+ if { $databases_in_memory } {
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit\
+ -create -mode 0644} $largs $omethod $testfile]
+ } else {
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit\
+ -create -mode 0644} $largs $omethod $testfile $sub]
+ }
+ error_check_good subdb [is_valid_db $db] TRUE
+ eval rep_test $method $masterenv $db $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ error_check_good client_close [$clientenv close] 0
+
+ #
+ # Run rep_test in the master (don't update client).
+ # Need to guarantee that we will change log files during
+ # this run so run with the largest pagesize and double
+ # the number of entries.
+ #
+ puts "\tRep$tnum.c: Running rep_test ( $largs) in replicated env."
+ set nentries [expr $niter * 2]
+ eval rep_test $method $masterenv $db $nentries 0 0 0 $largs
+ replclear 2
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_bad log.1.present [lsearch -exact $res log.0000000001] -1
+ set res [eval exec $util_path/db_archive -d -h $masterdir]
+ set res [eval exec $util_path/db_archive -l -h $masterdir]
+ error_check_good log.1.gone [lsearch -exact $res log.0000000001] -1
+
+ puts "\tRep$tnum.e: Reopen client ($opts)."
+ if { $opts == "clean" } {
+ env_cleanup $clientdir
+ }
+ if { $opts == "bulk" } {
+ error_check_good bulk [$masterenv rep_config {bulk on}] 0
+ }
+
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ #
+ # Since we are dropping frequent messages, we set the
+ # rerequest rate low to make sure the test finishes.
+ #
+ $clientenv rep_request 200 800
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+ set done 0
+ #
+ # We are done with this loop when the client has achieved
+ # startup_done and we've looped one more time after turning
+ # off dropping messages. Otherwise we might get a few
+ # straggling log records that don't make it over.
+ #
+ # Set a maximum iteration count because some methods can get
+ # into a state where if we're regularly dropping messages we
+ # can never catch up (queue) and we loop forever.
+ #
+ set iter 1
+ set max_drop_iter 200
+ if { $opts == "bulk" } {
+ #
+ # Since bulk is sending several messages at once we need to
+ # loop more times to allow rerequests to get through.
+ #
+ set max_drop_iter [expr $max_drop_iter * 2]
+ $clientenv rep_request 100 400
+ }
+ while { $done == 0 } {
+ puts "\tRep$tnum.e.1.$iter: Trigger log request"
+ #
+ # When we don't clean, starting the client doesn't
+ # trigger any events. We need to generate some log
+ # records so that the client requests the missing
+ # logs and that will trigger it.
+ #
+ set entries 4
+ eval rep_test $method $masterenv $db $entries $niter 0 0 $largs
+ process_msgs $envlist 0 NONE err
+ set stat [exec $util_path/db_stat -N -r -R A -h $clientdir]
+ #
+ # Loop until we are done with the RECOVER_PAGE phase.
+ #
+ set in_page [is_substr $stat "REP_F_RECOVER_PAGE"]
+ if { !$in_page || $iter >= $max_drop_iter } {
+ #
+ # If we're dropping, stop doing so.
+ # If we're not dropping, we're done.
+ #
+ if { $drop != 0 } {
+ set drop 0
+ } else {
+ set done 1
+ }
+ }
+ incr iter
+ }
+ error_check_good subdb_close [$db close] 0
+ #
+ # Stop dropping records, we've sent all the pages.
+ # We need to do that in order to make sure we get
+ # all the log records there and can accurately compare. Also, make sure
+ # enough time has passed so that the client's rep_request timer has
+ # expired, and make sure there are any messages to send to the client,
+ # so that there is something to trigger any needed final rerequest.
+ #
+ set drop 0
+ tclsleep 2
+ $masterenv txn_checkpoint -force
+ process_msgs $envlist 0 NONE err
+
+ puts "\tRep$tnum.f: Verify logs and databases"
+ #
+ # If doing bulk testing, turn it off now so that it forces us
+ # to flush anything currently in the bulk buffer. We need to
+ # do this because rep_test might have aborted a transaction on
+ # its last iteration and those log records would still be in
+ # the bulk buffer causing the log comparison to fail.
+ #
+ if { $opts == "bulk" } {
+ puts "\tRep$tnum.f.1: Turn off bulk transfers."
+ error_check_good bulk [$masterenv rep_config {bulk off}] 0
+ process_msgs $envlist 0 NONE err
+ }
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set dbname "test.$i.db"
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 1 1 0 $dbname
+ }
+
+ #
+ # Close the database held open on master for initialization.
+ #
+ foreach db $dbopen {
+ error_check_good db_close [$db close] 0
+ }
+
+ # Add records to the master and update client.
+ puts "\tRep$tnum.g: Add more records and check again."
+ set entries 10
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -mode 0644} $largs $omethod $testfile $sub]
+ error_check_good subdb [is_valid_db $db] TRUE
+ eval rep_test $method $masterenv $db $entries $niter 0 0 $largs
+ error_check_good subdb_close [$db close] 0
+ process_msgs $envlist 0 NONE err
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ rep_verify $masterdir $masterenv $clientdir $clientenv \
+ 1 1 0
+ }
+ set bulkxfer [stat_field $masterenv rep_stat "Bulk buffer transfers"]
+ if { $opts == "bulk" } {
+ error_check_bad bulkxferon $bulkxfer 0
+ } else {
+ error_check_good bulkxferoff $bulkxfer 0
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep062.tcl b/db-4.8.30/test/rep062.tcl
new file mode 100644
index 0000000..5ba3dab
--- /dev/null
+++ b/db-4.8.30/test/rep062.tcl
@@ -0,0 +1,321 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep062
+# TEST Test of internal initialization where client has a different
+# TEST kind of database than the master.
+# TEST
+# TEST Create a master of one type, and let the client catch up.
+# TEST Close the client.
+# TEST Remove the database on the master, and create a new
+# TEST database of the same name but a different type.
+# TEST Run the master ahead far enough that internal initialization
+# TEST will be required on the reopen of the client.
+# TEST Reopen the client and verify.
+
+proc rep062 { method {tnum "062"} args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # This test uses different access methods internally.
+ # Called from outside, accept only btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rep$tnum for method $method."
+ return
+ }
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r):\
+ Internal initialization with change in\
+ access method of database $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep062_sub $method $tnum $l $r $args
+ }
+ }
+}
+
+proc rep062_sub { method tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global passwd
+ global has_crypto
+ global encrypt
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set maxpg 16384
+ set log_max [expr $maxpg * 8]
+ set cache [expr $maxpg * 32]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Set up pairs of databases to test. The first element is whether
+ # to open an encrypted env, the second is the original database
+ # method and flags, the third is the replacement database and flags.
+ set pairlist {
+ { 0 {btree ""} {hash ""} }
+ { 0 {queueext "-pagesize 2048"} {queue ""} }
+ { 0 {queueext ""} {btree ""} }
+ { 0 {queue ""} {recno ""} }
+ { 0 {hash ""} {queue ""} }
+ { 0 {recno ""} {btree ""} }
+ { 0 {hash ""} {queueext "-pagesize 16384"} }
+ { 0 {queueext "-pagesize 2048"} {queueext "-pagesize 16384"} }
+ { 0 {queueext "-pagesize 16384"} {queueext "-pagesize 2048"} }
+ { 0 {queue ""} {queueext "-pagesize 16384"} }
+ { 1 {btree ""} {btree "-encrypt"} }
+ { 1 {btree "-encrypt"} {btree ""} }
+ { 1 {queue ""} {queue "-encrypt"} }
+ { 1 {queue "-encrypt"} {queue ""} }
+ }
+
+ foreach p $pairlist {
+ env_cleanup $testdir
+ # Extract values from the list.
+ set encryptenv [lindex [lindex $p 0] 0]
+ set encryptmsg "clear"
+ if { $has_crypto == 0 && $encryptenv == 1 } {
+ continue
+ }
+ if { $encryptenv == 1 } {
+ set encryptmsg "encrypted"
+ }
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set method1 [lindex [lindex $p 1] 0]
+ set method2 [lindex [lindex $p 2] 0]
+ if { $databases_in_memory } {
+ if { [is_queueext $method1] || [is_queueext $method2] } {
+ puts "Skipping this set for in-memory databases"
+ continue
+ }
+ }
+
+ set flags1 [lindex [lindex $p 1] 1]
+ set flags2 [lindex [lindex $p 2] 1]
+
+ puts "Rep$tnum: Testing with $encryptmsg env."
+ puts -nonewline "Rep$tnum: Replace [lindex $p 1] "
+ puts "database with [lindex $p 2] database."
+
+ # Set up flags for encryption if necessary.
+ set envflags ""
+ set enc ""
+ if { $encryptenv == 1 } {
+ set envflags "-encryptaes $passwd"
+ set enc " -P $passwd"
+ }
+
+ # Derive args for specified methods.
+ set args1 [convert_args $method1 ""]
+ set args2 [convert_args $method2 ""]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs -log_max $log_max $verbargs -errpfx MASTER \
+ -cachesize { 0 $cache 1 } $envflags $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client.
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs -log_max $log_max $verbargs -errpfx CLIENT \
+ -cachesize { 0 $cache 1 } $envflags $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Open two databases on the master - one to test different
+ # methods, one to advance the log, forcing internal
+ # initialization.
+
+ puts "\tRep$tnum.a: Open test database (it will change methods)."
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ set testfile2 { "" "test2.db" }
+ } else {
+ set testfile "test.db"
+ set testfile2 "test2.db"
+ }
+
+ set omethod [convert_method $method1]
+ set db1 [eval {berkdb_open} -env $masterenv -auto_commit \
+ -create $omethod $flags1 $args1 -mode 0644 $testfile]
+ error_check_good db1open [is_valid_db $db1] TRUE
+
+ puts "\tRep$tnum.b: Open log-advance database."
+ set db2 [eval {berkdb_open} -env $masterenv -auto_commit \
+ -create $omethod $args1 -mode 0644 $flags1 $testfile2]
+ error_check_good db2open [is_valid_db $db2] TRUE
+
+ puts "\tRep$tnum.c: Add a few records to test db."
+ set nentries 10
+ set start 0
+ eval rep_test $method1 \
+ $masterenv $db1 $nentries $start $start 0 $args1
+ incr start $nentries
+ process_msgs $envlist
+
+ puts "\tRep$tnum.d: Close client."
+
+ # First save the log number of the latest client log.
+ set last_client_log [get_logfile $clientenv last]
+ error_check_good client_close [$clientenv close] 0
+
+ # Close the database on the master, and if it's on-disk,
+ # remove it. Now create a new database of different type.
+ puts "\tRep$tnum.e: Remove test database."
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db1_remove [eval {$masterenv dbremove} $testfile] 0
+
+ puts "\tRep$tnum.f: \
+ Create new test database; same name, different method."
+ set omethod [convert_method $method2]
+ set db1 [eval {berkdb_open} -env $masterenv -auto_commit \
+ -create $omethod $flags2 $args2 -mode 0644 $testfile]
+ error_check_good db1open [is_valid_db $db1] TRUE
+
+ # Run rep_test in the master enough to require internal
+ # initialization upon client reopen. Use the extra db.
+ set stop 0
+ set niter 100
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.g: \
+ Run rep_test until internal init is required."
+ eval rep_test $method1 $masterenv \
+ $db2 $niter $start $start 0 $largs
+ incr start $niter
+ replclear 2
+
+ puts "\tRep$tnum.h: Run db_archive on master."
+ if { $m_logtype != "in-memory"} {
+ set res [eval exec \
+ $util_path/db_archive $enc -d -h $masterdir]
+ set res [eval exec \
+ $util_path/db_archive $enc -l -h $masterdir]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ puts "\tRep$tnum.i: Reopen client."
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist 0 NONE err
+
+ puts "\tRep$tnum.j: Add a few records to cause initialization."
+ set entries 20
+ eval rep_test $method2 \
+ $masterenv $db1 $entries $start $start 0 $largs
+ incr start $entries
+ process_msgs $envlist 0 NONE err
+
+ puts "\tRep$tnum.k: Verify logs and databases"
+ # Make sure encryption value is correct.
+ if { $encryptenv == 1 } {
+ set encrypt 1
+ }
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ set testfile2 "test2.db"
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1 $testfile2
+
+ # Check that logs are in-memory or on-disk as expected.
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+ }
+}
+
diff --git a/db-4.8.30/test/rep063.tcl b/db-4.8.30/test/rep063.tcl
new file mode 100644
index 0000000..2128bed
--- /dev/null
+++ b/db-4.8.30/test/rep063.tcl
@@ -0,0 +1,397 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep063
+# TEST Replication election test with simulated different versions
+# TEST for each site. This tests that old sites with real priority
+# TEST trump ELECTABLE sites with zero priority even with greater LSNs.
+# TEST There is a special case in the code for testing that if the
+# TEST priority is <= 10, we simulate mixed versions for elections.
+# TEST
+# TEST Run a rep_test in a replicated master environment and close;
+# TEST hold an election among a group of clients to make sure they select
+# TEST the master with varying LSNs and priorities.
+#
+proc rep063 { method args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ set tnum "063"
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ set nclients 5
+ set logsets [create_logsets [expr $nclients + 1]]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ set recopts { "" "-recover" }
+ foreach r $recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication\
+ elections with varying versions $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep063_sub $method $nclients $tnum $l $r $args
+ }
+ }
+}
+
+proc rep063_sub { method nclients tnum logset recargs largs } {
+ source ./include.tcl
+ global electable_pri
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set niter 80
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ set envlist {}
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_txnargs $m_logargs -rep_master $verbargs \
+ -errpfx MASTER -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create -home $clientdir($i) \
+ -event rep_event $repmemargs \
+ $c_txnargs($i) $c_logargs($i) -rep_client \
+ -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i) $recargs]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ lappend envlist "$clientenv($i) $envid"
+ }
+ # Bring the clients online by processing the startup messages.
+ process_msgs $envlist
+
+ # Run a modified test001 in the master.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ #
+ # We remove some client envs and run rep_test so that we can
+ # force some client LSNs to be further ahead/behind than others.
+ # When we're done, the LSNs look like this:
+ #
+ # Client0: ......................
+ # Client1: ...........
+ # Client2: ...........
+ # Client3: ......................
+ # Client4: .................................
+ #
+ # Remove client 1 and 2 from list to process, this guarantees
+ # clients 0, 3 and 4 are ahead in LSN. We use each of these
+ # in different parts of the test so guarantee bigger LSNs.
+ #
+ set orig_env $envlist
+ set envlist [lreplace $envlist 3 3]
+ set envlist [lreplace $envlist 2 2]
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+ #
+ # Remove client 3 so that client 4 has the biggest LSN of all.
+ #
+ set eend [llength $envlist]
+ set cl3_i [expr $eend - 2]
+ set envlist [lreplace $envlist $cl3_i $cl3_i]
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+ #
+ # Put all removed clients back in.
+ #
+ set envlist $orig_env
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ #
+ # This test doesn't use the testing hooks, so
+ # initialize err_cmd and crash appropriately.
+ #
+ set err_cmd($i) "none"
+ set crash($i) 0
+ #
+ # Initialize the array pri. We'll set it to
+ # appropriate values when the winner is determined.
+ #
+ set pri($i) 0
+ #
+ if { $rep_verbose == 1 } {
+ error_check_good pfx [$clientenv($i) errpfx CLIENT$i] 0
+ $clientenv($i) verbose $verbose_type on
+ set env_cmd($i) [concat $env_cmd($i) \
+ "-errpfx CLIENT$i $verbargs "]
+ }
+ }
+ #
+ # Remove clients 3 and 4 from the envlist. We'll save those for
+ # later.
+ #
+ set cl4 [lindex $envlist 4]
+ set envlist [lreplace $envlist 4 4]
+ set cl3 [lindex $envlist 3]
+ set envlist [lreplace $envlist 3 3]
+
+ set m "Rep$tnum.b"
+ #
+ # Client 0 has the biggest LSN of clients 0, 1, 2.
+ # However, 'setpriority' will set the priority of client 1
+ # to simulate client 1 being an "older version" client.
+ # Client 1 should win even though its LSN is smaller.
+ # This tests one "older" client and the rest "newer".
+ #
+ puts "\t$m: Test old client trumps new clients with bigger LSN."
+ set orig_ncl $nclients
+ set nclients 3
+ set nsites $nclients
+ set nvotes $nclients
+ set winner 1
+ set elector 2
+ setpriority pri $nclients $winner 0 1
+
+ # Set up databases as in-memory or on-disk and run the election.
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ run_election env_cmd envlist err_cmd pri crash\
+ $qdir $m $elector $nsites $nvotes $nclients $winner 0 $dbname
+
+ #
+ # In all of the checks of the Election Priority stat field,
+ # we use clientenv(2). The reason is that we never expect
+ # client 2 to be the winner. The env handles of client 0 and 1
+ # are getting closed and reopened as a master/client in
+ # the election and the old recorded handles are invalid.
+ # This one is known to be valid throughout the entire test.
+ #
+ error_check_bad old_pri [stat_field $clientenv(2) rep_stat \
+ "Election priority"] 0
+ #
+ # When we finish the election, all clients are at the same LSN.
+ # Call this proc to make the winner have a larger LSN than the
+ # other 2 remaining clients, and reopen the winner as a client.
+ #
+ rep063_movelsn_reopen $method envlist $env_cmd($winner) $winner $largs
+
+ set m "Rep$tnum.c"
+ puts "\t$m: Test old client with zero priority new client."
+ #
+ # Client 1 now has a bigger LSN, so make client 0 the old client
+ # and client 1 a real 0 priority new client.
+ #
+ set winner 0
+ setpriority pri $nclients $winner 0 1
+ set pri(1) 0
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $m $elector $nsites $nvotes $nclients $winner 0 $dbname
+ error_check_bad old_pri [stat_field $clientenv(2) rep_stat \
+ "Election priority"] 0
+ rep063_movelsn_reopen $method envlist $env_cmd($winner) $winner $largs
+
+ set m "Rep$tnum.d"
+ puts "\t$m: Test multiple old clients with new client."
+ #
+ # Client 0 is now has a bigger LSN, so make client 1 winner.
+ # We are setting client 2's priority to something bigger so that
+ # we simulate having 2 "older version" clients (clients 1 and 2)
+ # and one new client (client 0). This tests that the right client
+ # among the older versions gets correctly elected even though there
+ # is a bigger LSN "new" client participating.
+ #
+ set winner 1
+ setpriority pri $nclients $winner 0 1
+ set pri(2) [expr $pri(1) / 2]
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $m $elector $nsites $nvotes $nclients $winner 0 $dbname
+ error_check_bad old_pri [stat_field $clientenv(2) rep_stat \
+ "Election priority"] 0
+ rep063_movelsn_reopen $method envlist $env_cmd($winner) $winner $largs
+
+ set m "Rep$tnum.e"
+ puts "\t$m: Test new clients, client 1 not electable."
+ #
+ # Client 1 now has a bigger LSN, so make it unelectable. Add in
+ # old client 3 since that should be the biggest LSN of all these.
+ # Set all other priorities to electable_pri to make them all equal (and
+ # all "new" clients). We know client 3 should win because we
+ # set its LSN much farther ahead in the beginning.
+ #
+ set winner 3
+ replclear [expr $winner + 2]
+ set nclients 4
+ set nsites $nclients
+ set nvotes $nclients
+ set pri(0) $electable_pri
+ set pri(1) 0
+ set pri(2) $electable_pri
+ set pri(3) $electable_pri
+ replclear [lindex $cl3 1]
+ lappend envlist $cl3
+ #
+ # Winner should be zero priority.
+ #
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $m $elector $nsites $nvotes $nclients $winner 0 $dbname
+ error_check_good elect_pri [stat_field $clientenv(2) rep_stat \
+ "Election priority"] 0
+ rep063_movelsn_reopen $method envlist $env_cmd($winner) $winner $largs
+
+ #
+ # Now add in Client 4, the site with the biggest LSN of all.
+ # Test with all being electable clients.
+ #
+ set m "Rep$tnum.f"
+ puts "\t$m: Test all new electable clients."
+ set winner 4
+ set nclients 5
+ set nsites $nclients
+ set nvotes $nclients
+ set pri(0) $electable_pri
+ set pri(1) $electable_pri
+ set pri(2) $electable_pri
+ set pri(3) $electable_pri
+ set pri(4) $electable_pri
+ replclear [expr $winner + 2]
+ lappend envlist $cl4
+ #
+ # Client 4 has biggest LSN and should now win, but winner should
+ # be zero priority.
+ #
+ run_election env_cmd envlist err_cmd pri crash $qdir \
+ $m $elector $nsites $nvotes $nclients $winner 0 $dbname
+ error_check_good elect_pri [stat_field $clientenv(2) rep_stat \
+ "Election priority"] 0
+
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+ replclose $testdir/MSGQUEUEDIR
+}
+
+#
+# Move the LSN ahead on the newly elected master, while not sending
+# those messages to the other clients. Then close the env and
+# reopen it as a client. Use upvar so that the envlist is
+# modified when we return and can get messages.
+#
+proc rep063_movelsn_reopen { method envlist env_cmd eindex largs } {
+ upvar $envlist elist
+
+ set clrlist { }
+ set i 0
+ foreach e $elist {
+ #
+ # If we find the master env entry, get its env handle.
+ # If not, then get the id so that we can replclear it later.
+ #
+ if { $i == $eindex } {
+ set masterenv [lindex $e 0]
+ } else {
+ lappend clrlist [lindex $e 1]
+ }
+ incr i
+ }
+ #
+ # Move this env's LSN ahead.
+ #
+ set niter 10
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ foreach cl $clrlist {
+ replclear $cl
+ }
+ #
+ # Now close this env and reopen it as a client.
+ #
+ error_check_good newmaster_close [$masterenv close] 0
+ set newclenv [eval $env_cmd]
+ error_check_good cl [is_valid_env $newclenv] TRUE
+ set newenv "$newclenv [expr $eindex + 2]"
+ set elist [lreplace $elist $eindex $eindex $newenv]
+ process_msgs $elist
+}
diff --git a/db-4.8.30/test/rep064.tcl b/db-4.8.30/test/rep064.tcl
new file mode 100644
index 0000000..32b6fdb
--- /dev/null
+++ b/db-4.8.30/test/rep064.tcl
@@ -0,0 +1,168 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep064
+# TEST Replication rename and forced-upgrade test.
+# TEST
+# TEST The test verifies that the client correctly
+# TEST (internally) closes files when upgrading to master.
+# TEST It does this by having the master have a database
+# TEST open, then crashing. The client upgrades to master,
+# TEST and attempts to remove the open database.
+
+proc rep064 { method { niter 10 } { tnum "064" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Run for btree only. Since we're testing removal of a
+ # file, method doesn't make any difference.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ set logsets [create_logsets 2]
+ set args [convert_args $method $args]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r): Replication test\
+ closure of open files on upgrade $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep064_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep064_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ set c_logtype [lindex $logset 1]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -errpfx MASTER -errfile /dev/stderr $verbargs $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -errpfx CLIENT -errfile /dev/stderr $verbargs $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.a: Open an empty db, and leave it open."
+ # Set up names for the db to be left open and empty, and
+ # also for the db that we'll let rep_test open in part .b.
+ if { $databases_in_memory } {
+ set opendb { "" "open.db" }
+ set testfile { "" "test.db" }
+ } else {
+ set opendb "open.db"
+ set testfile "test.db"
+ }
+
+ set masterdb [eval {berkdb_open}\
+ -env $masterenv -create -btree -auto_commit $opendb]
+ error_check_good db [is_valid_db $masterdb] TRUE
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep$tnum.b: Open another db, and add some data."
+ eval rep_test $method $masterenv NULL $niter 0 0 $largs
+ process_msgs $envlist
+
+ # This simulates a master crashing, we're the only one in the
+ # group. No need to process messages.
+ #
+ puts "\tRep$tnum.c: Upgrade client."
+ error_check_good client_upg [$clientenv rep_start -master] 0
+
+ puts "\tRep$tnum.d: Remove open databases."
+ set stat [catch {eval $clientenv dbremove -auto_commit $opendb} ret]
+ error_check_good remove_open_file $ret 0
+ error_check_good remove_open_file $stat 0
+
+ set stat [catch {eval $clientenv dbremove -auto_commit $testfile} ret]
+ error_check_good remove_closed_file $ret 0
+ error_check_good remove_closed_file $stat 0
+
+ error_check_good dbclose [$masterdb close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep065.tcl b/db-4.8.30/test/rep065.tcl
new file mode 100644
index 0000000..b0899b7
--- /dev/null
+++ b/db-4.8.30/test/rep065.tcl
@@ -0,0 +1,444 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep065
+# TEST Tests replication running with different versions.
+# TEST This capability is introduced with 4.5.
+# TEST
+# TEST Start a replication group of 1 master and N sites, all
+# TEST running some historical version greater than or equal to 4.4.
+# TEST Take down a client and bring it up again running current.
+# TEST Run some upgrades, make sure everything works.
+# TEST
+# TEST Each site runs the tcllib of its own version, but uses
+# TEST the current tcl code (e.g. test.tcl).
+proc rep065 { method { nsites 3 } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global noenv_messaging
+ set noenv_messaging 1
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ #
+ # Skip all methods but btree - we don't use the method, as we
+ # run over all of them with varying versions.
+ #
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+
+ if { [is_btree $method] == 0 } {
+ puts "Rep065: Skipping for method $method."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Make the list of {method version} pairs to test.
+ #
+ set mvlist [method_version]
+ set mvlen [llength $mvlist]
+ puts "Rep065: Testing the following $mvlen method/version pairs:"
+ puts "Rep065: $mvlist"
+ puts "Rep065: $msg2"
+ set count 1
+ set total [llength $mvlist]
+ set slist [setup_sites $nsites]
+ foreach i $mvlist {
+ puts "Rep065: Test iteration $count of $total: $i"
+ rep065_sub $count $i $nsites $slist
+ incr count
+ }
+ set noenv_messaging 0
+}
+
+proc rep065_sub { iter mv nsites slist } {
+ source ./include.tcl
+ global machids
+ global util_path
+ set machids {}
+ set method [lindex $mv 0]
+ set vers [lindex $mv 1]
+
+ puts "\tRep065.$iter.a: Set up."
+ # Whatever directory we started this process from is referred
+ # to as the controlling directory. It will contain the message
+ # queue and start all the child processes.
+ set controldir [pwd]
+ env_cleanup $controldir/$testdir
+ replsetup_noenv $controldir/$testdir/MSGQUEUEDIR
+
+ # Set up the historical build directory. The master will start
+ # running with historical code.
+ #
+ # This test presumes we are running in the current build
+ # directory and that the expected historical builds are
+ # set up in a similar fashion. If they are not, quit gracefully.
+
+ set pwd [pwd]
+ set homedir [file dirname [file dirname $pwd]]
+ set reputils_path $pwd/../test
+ set histdir $homedir/$vers/build_unix
+ if { [file exists $histdir] == 0 } {
+ puts -nonewline "Skipping iteration $iter: cannot find"
+ puts " historical version $vers."
+ return
+ }
+ if { [file exists $histdir/db_verify] == 0 } {
+ puts -nonewline "Skipping iteration $iter: historical version"
+ puts " $vers is missing some executables. Is it built?"
+ return
+ }
+
+ set histtestdir $histdir/TESTDIR
+
+ env_cleanup $histtestdir
+ set markerdir $controldir/$testdir/MARKER
+ file delete -force $markerdir
+
+ # Create site directories. They start running in the historical
+ # directory, too. They will be upgraded to the current version
+ # first.
+ set allids { }
+ for { set i 0 } { $i < $nsites } { incr i } {
+ set siteid($i) [expr $i + 1]
+ set sid $siteid($i)
+ lappend allids $sid
+ set histdirs($sid) $histtestdir/SITE.$i
+ set upgdir($sid) $controldir/$testdir/SITE.$i
+ file mkdir $histdirs($sid)
+ file mkdir $upgdir($sid)
+ }
+
+ # Open master env running 4.4.
+ #
+ # We know that slist has all sites starting in the histdir.
+ # So if we encounter an upgrade value, we upgrade that client
+ # from the hist dir.
+ #
+ set count 1
+ foreach sitevers $slist {
+ puts "\tRep065.b.$iter.$count: Run with sitelist $sitevers."
+ #
+ # Delete the marker directory each iteration so that
+ # we don't find old data in there.
+ #
+ file delete -force $markerdir
+ file mkdir $markerdir
+ #
+ # Get the chosen master index from the list of sites.
+ #
+ set mindex [get_master $nsites $sitevers]
+ set meid [expr $mindex + 1]
+
+ #
+ # Kick off the test processes. We need 1 test process
+ # per site and 1 message process per site.
+ #
+ set pids {}
+ for { set i 0 } { $i < $nsites } { incr i } {
+ set upg [lindex $sitevers $i]
+ set sid $siteid($i)
+ #
+ # If we are running "old" set up an array
+ # saying if this site has run old/new yet.
+ # The reason is that we want to "upgrade"
+ # only the first time we go from old to new,
+ # not every iteration through this loop.
+ #
+ if { $upg == 0 } {
+ puts -nonewline "\t\tRep065.b: Test: Old site $i"
+ set sitedir($i) $histdirs($sid)
+ set already_upgraded($i) 0
+ } else {
+ puts -nonewline "\t\tRep065.b: Test: Upgraded site $i"
+ set sitedir($i) $upgdir($sid)
+ if { $already_upgraded($i) == 0 } {
+ upg_repdir $histdirs($sid) $sitedir($i)
+ }
+ set already_upgraded($i) 1
+ }
+ if { $sid == $meid } {
+ set state MASTER
+ set runtest [list REPTEST $method 15 10]
+ puts " (MASTER)"
+ } else {
+ set state CLIENT
+ set runtest {REPTEST_GET}
+ puts " (CLIENT)"
+ }
+ lappend pids [exec $tclsh_path $test_path/wrap.tcl \
+ rep065script.tcl \
+ $controldir/$testdir/$count.S$i.log \
+ SKIP \
+ START $state \
+ $runtest \
+ $sid $allids $controldir \
+ $sitedir($i) $reputils_path &]
+ lappend pids [exec $tclsh_path $test_path/wrap.tcl \
+ rep065script.tcl \
+ $controldir/$testdir/$count.S$i.msg \
+ SKIP \
+ PROCMSGS $state \
+ NULL \
+ $sid $allids $controldir \
+ $sitedir($i) $reputils_path &]
+ }
+
+ watch_procs $pids 20
+ #
+ # At this point, clean up any message files. The message
+ # system leads to a significant number of duplicate
+ # requests. If the master site handled them after the
+ # client message processes exited, then there can be
+ # a large number of "dead" message files waiting for
+ # non-existent clients. Just clean up everyone.
+ #
+ for { set i 0 } { $i < $nsites } { incr i } {
+ replclear_noenv $siteid($i)
+ }
+
+ #
+ # Kick off the verification processes. These just walk
+ # their own logs and databases, so we don't need to have
+ # a message process. We need separate processes because
+ # old sites need to use old utilities.
+ #
+ set pids {}
+ puts "\tRep065.c.$iter.$count: Verify all sites."
+ for { set i 0 } { $i < $nsites } { incr i } {
+ if { $siteid($i) == $meid } {
+ set state MASTER
+ } else {
+ set state CLIENT
+ }
+ lappend pids [exec $tclsh_path $test_path/wrap.tcl \
+ rep065script.tcl \
+ $controldir/$testdir/$count.S$i.ver \
+ SKIP \
+ VERIFY $state \
+ {LOG DB} \
+ $siteid($i) $allids $controldir \
+ $sitedir($i) $reputils_path &]
+ }
+
+ watch_procs $pids 10
+ #
+ # Now that each site created its verification files,
+ # we can now verify everyone.
+ #
+ for { set i 0 } { $i < $nsites } { incr i } {
+ if { $i == $mindex } {
+ continue
+ }
+ puts \
+ "\t\tRep065.c: Verify: Compare databases master and client $i"
+ error_check_good db_cmp \
+ [filecmp $sitedir($mindex)/VERIFY/dbdump \
+ $sitedir($i)/VERIFY/dbdump] 0
+ set upg [lindex $sitevers $i]
+ # !!!
+ # Although db_printlog works and can read old logs,
+ # there have been some changes to the output text that
+ # makes comparing difficult. One possible solution
+ # is to run db_printlog here, from the current directory
+ # instead of from the historical directory.
+ #
+ if { $upg == 0 } {
+ puts \
+ "\t\tRep065.c: Verify: Compare logs master and client $i"
+ error_check_good log_cmp \
+ [filecmp $sitedir($mindex)/VERIFY/prlog \
+ $sitedir($i)/VERIFY/prlog] 0
+ } else {
+ puts \
+ "\t\tRep065.c: Verify: Compare LSNs master and client $i"
+ error_check_good log_cmp \
+ [filecmp $sitedir($mindex)/VERIFY/loglsn \
+ $sitedir($i)/VERIFY/loglsn] 0
+ }
+ }
+
+ #
+ # At this point we have a master and sites all up to date
+ # with each other. Now, one at a time, upgrade the sites
+ # to the current version and start everyone up again.
+ incr count
+ }
+}
+
+proc setup_sites { nsites } {
+ #
+ # Set up a list that goes from 0 to $nsites running
+ # upgraded. A 0 represents running old version and 1
+ # represents running upgraded. So, for 3 sites it will look like:
+ # { 0 0 0 } { 1 0 0 } { 1 1 0 } { 1 1 1 }
+ #
+ set sitelist {}
+ for { set i 0 } { $i <= $nsites } { incr i } {
+ set l ""
+ for { set j 1 } { $j <= $nsites } { incr j } {
+ if { $i < $j } {
+ lappend l 0
+ } else {
+ lappend l 1
+ }
+ }
+ lappend sitelist $l
+ }
+ return $sitelist
+}
+
+proc upg_repdir { histdir upgdir } {
+ global util_path
+
+ #
+ # Upgrade a site to the current version. This entails:
+ # 1. Removing any old files from the upgrade directory.
+ # 2. Copy all old version files to upgrade directory.
+ # 3. Remove any __db files from upgrade directory except __db.rep*gen.
+ # 4. Force checkpoint in new version.
+ file delete -force $upgdir
+
+ # Recovery was run before as part of upgradescript.
+ # Archive dir by copying it to upgrade dir.
+ file copy -force $histdir $upgdir
+ set dbfiles [glob -nocomplain $upgdir/__db*]
+ foreach d $dbfiles {
+ if { $d == "$upgdir/__db.rep.gen" ||
+ $d == "$upgdir/__db.rep.egen" } {
+ continue
+ }
+ file delete -force $d
+ }
+ # Force current version checkpoint
+ set stat [catch {eval exec $util_path/db_checkpoint -1 -h $upgdir} r]
+ if { $stat != 0 } {
+ puts "CHECKPOINT: $upgdir: $r"
+ }
+ error_check_good stat_ckp $stat 0
+}
+
+proc get_master { nsites verslist } {
+ error_check_good vlist_chk [llength $verslist] $nsites
+ #
+ # When we can, simply run an election to get a new master.
+ # We then verify we got an old client.
+ #
+ # For now, randomly pick among the old sites, or if no old
+ # sites just randomly pick anyone.
+ #
+ set old_count 0
+ # Pick 1 out of N old sites or 1 out of nsites if all upgraded.
+ foreach i $verslist {
+ if { $i == 0 } {
+ incr old_count
+ }
+ }
+ if { $old_count == 0 } {
+ set old_count $nsites
+ }
+ set master [berkdb random_int 0 [expr $old_count - 1]]
+ #
+ # Since the Nth old site may not be at the Nth place in the
+ # list unless we used the entire list, we need to loop to find
+ # the right index to return.
+ if { $old_count == $nsites } {
+ return $master
+ }
+ set ocount 0
+ set index 0
+ foreach i $verslist {
+ if { $i == 1 } {
+ incr index
+ continue
+ }
+ if { $ocount == $master } {
+ return $index
+ }
+ incr ocount
+ incr index
+ }
+ #
+ # If we get here there is a problem in the code.
+ #
+ error "FAIL: get_master problem"
+}
+
+proc method_version { } {
+ global valid_methods
+
+ set meth $valid_methods
+ set startmv { {btree db-4.4.20} {hash db-4.5.20} }
+
+ # Remove btree and hash from the method list, we're manually
+ # assigning those versions due to log/recovery record changes
+ # at that version.
+ set midx [lsearch -exact $meth hash]
+ set meth [lreplace $meth $midx $midx]
+ set midx [lsearch -exact $meth btree]
+ set meth [lreplace $meth $midx $midx]
+
+ set vers {db-4.4.20 db-4.5.20 db-4.6.21 db-4.7.25}
+ set dbvlen [llength $vers]
+ #
+ # NOTE: The values in "vers_list" are indices into $vers above.
+ # Since we're explicitly testing 4.4.20 and 4.5.20 above,
+ # weight later versions more.
+ # When you add a new version to $vers, you must
+ # add some new items to $vers_list to choose that index.
+ # Also need to add an entry for 'vtest' below.
+ #
+ set vers_list { 0 0 1 1 2 2 2 3 3 3 }
+ set vers_len [expr [llength $vers_list] - 1]
+
+ # Walk through the list of remaining methods and randomly
+ # assign a version to each one.
+ while { 1 } {
+ set mv $startmv
+ # We want to make sure we test each version.
+ # 4.4.20
+ set vtest(0) 1
+ # 4.5.20
+ set vtest(1) 1
+ # 4.6.21
+ set vtest(2) 0
+ # 4.7.25
+ set vtest(3) 0
+ foreach m $meth {
+ # Index into distribution list.
+ set vidx [berkdb random_int 0 $vers_len]
+ # Index into version list.
+ set vindex [lindex $vers_list $vidx]
+ set vtest($vindex) 1
+ set v [lindex $vers $vindex]
+ lappend mv [list $m $v]
+ }
+ #
+ # Assume success. If we find any $vtest entry of 0,
+ # then we fail and try again.
+ #
+ set all_vers 1
+ for { set i 0 } { $i < $dbvlen } { incr i } {
+ if { $vtest($i) == 0 } {
+ set all_vers 0
+ }
+ }
+ if { $all_vers == 1 } {
+ break
+ }
+# puts "Did not get all versions with $mv."
+ }
+
+ return $mv
+}
diff --git a/db-4.8.30/test/rep065script.tcl b/db-4.8.30/test/rep065script.tcl
new file mode 100644
index 0000000..f3049b4
--- /dev/null
+++ b/db-4.8.30/test/rep065script.tcl
@@ -0,0 +1,416 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# rep065script - procs to use at each replication site in the
+# replication upgrade test.
+#
+# type: START, PROCMSGS, VERIFY
+# START starts up a replication site and performs an operation.
+# the operations are:
+# REPTEST runs the rep_test_upg procedure on the master.
+# REPTEST_GET run a read-only test on a client.
+# REPTEST_ELECT runs an election on the site.
+# PROCMSGS processes messages until none are left.
+# VERIFY dumps the log and database contents.
+# role: master or client
+# op: operation to perform
+# envid: environment id number for use in replsend
+# allids: all env ids we need for sending
+# ctldir: controlling directory
+# mydir: directory where this participant runs
+# reputils_path: location of reputils.tcl
+
+proc rep065scr_elect { repenv oplist } {
+ set ver [lindex $oplist 1]
+ set pri [lindex $oplist 2]
+}
+
+proc rep065scr_reptest { repenv oplist markerdb } {
+
+ set method [lindex $oplist 1]
+ set niter [lindex $oplist 2]
+ set loop [lindex $oplist 3]
+ set start 0
+ puts "REPTEST: method $method, niter $niter, loop $loop"
+
+ for {set n 0} {$n < $loop} {incr n} {
+ puts "REPTEST: call rep_test_upg $n"
+ eval rep_test_upg $method $repenv NULL $niter $start $start 0 0
+ incr start $niter
+ tclsleep 3
+ }
+ #
+ # Sleep a bunch to help get the messages worked through.
+ #
+ tclsleep 10
+ puts "put DONE to marker"
+ error_check_good marker_done [$markerdb put DONE DONE] 0
+ error_check_good marker_sync [$markerdb sync] 0
+}
+
+proc rep065scr_repget { repenv oplist mydir markerfile } {
+ set dbname "$mydir/test.db"
+ set i 0
+ while { [file exists $dbname] == 0 } {
+ tclsleep 2
+ incr i
+ if { $i >= 15 && $i % 5 == 0 } {
+ puts "After $i seconds, no database exists."
+ }
+ if { $i > 180 } {
+ error "Database never created."
+ }
+ }
+ set loop 1
+ while { 1 } {
+ set markerdb [berkdb_open $markerfile]
+ error_check_good marker [is_valid_db $markerdb] TRUE
+ set kd [$markerdb get DONE]
+ error_check_good marker_close [$markerdb close] 0
+ if { [llength $kd] != 0 } {
+ break
+ }
+ set db [berkdb_open -env $repenv $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ set i 0
+ error_check_good curs [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first ] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ incr i
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ puts "REPTEST_GET: after $loop loops: key count $i"
+ incr loop
+ tclsleep 2
+ }
+}
+proc rep065scr_starttest { role oplist envid msgdir mydir allids markerfile } {
+ global qtestdir
+ global util_path
+ global repfiles_in_memory
+
+ puts "repladd_noenv $allids"
+ set qtestdir $msgdir
+ foreach id $allids {
+ repladd_noenv $id
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set markerdb [berkdb_open -create -btree $markerfile]
+ error_check_good marker [is_valid_db $markerdb] TRUE
+ puts "set up env cmd"
+ set lockmax 40000
+ set logbuf [expr 16 * 1024]
+ set logmax [expr $logbuf * 4]
+ if { $role == "MASTER" } {
+ set rep_env_cmd "berkdb_env_noerr -create -home $mydir \
+ -log_max $logmax -log_buffer $logbuf $repmemargs \
+ -lock_max_objects $lockmax -lock_max_locks $lockmax \
+ -errpfx MASTER -txn -rep_master \
+ -rep_transport \[list $envid replsend_noenv\]"
+ set rep_env_cmd "berkdb_env_noerr -create -home $mydir \
+ -log_max $logmax -log_buffer $logbuf $repmemargs \
+ -lock_max_objects $lockmax -lock_max_locks $lockmax \
+ -errpfx MASTER -txn -rep_master \
+ -verbose {rep on} -errfile /dev/stderr \
+ -rep_transport \[list $envid replsend_noenv\]"
+ } elseif { $role == "CLIENT" } {
+ set rep_env_cmd "berkdb_env_noerr -create -home $mydir \
+ -log_max $logmax -log_buffer $logbuf $repmemargs \
+ -lock_max_objects $lockmax -lock_max_locks $lockmax \
+ -errpfx CLIENT -txn -rep_client \
+ -rep_transport \[list $envid replsend_noenv\]"
+ set rep_env_cmd "berkdb_env_noerr -create -home $mydir \
+ -log_max $logmax -log_buffer $logbuf $repmemargs \
+ -lock_max_objects $lockmax -lock_max_locks $lockmax \
+ -errpfx CLIENT -txn -rep_client \
+ -verbose {rep on} -errfile /dev/stderr \
+ -rep_transport \[list $envid replsend_noenv\]"
+ } else {
+ puts "FAIL: unrecognized replication role $role"
+ return
+ }
+
+ # Change directories to where this will run.
+ # !!!
+ # mydir is an absolute path of the form
+ # <path>/build_unix/TESTDIR/MASTERDIR or
+ # <path>/build_unix/TESTDIR/CLIENTDIR.0
+ #
+ # So we want to run relative to the build_unix directory
+ cd $mydir/../..
+
+ puts "open repenv $rep_env_cmd"
+ set repenv [eval $rep_env_cmd]
+ error_check_good repenv_open [is_valid_env $repenv] TRUE
+
+ puts "repenv is $repenv"
+ #
+ # Indicate that we're done starting up. Sleep to let
+ # others do the same.
+ #
+ puts "put START$envid to marker"
+ error_check_good marker_done [$markerdb put START$envid START$envid] 0
+ error_check_good marker_sync [$markerdb sync] 0
+ puts "sleeping after marker"
+ tclsleep 3
+
+ # Here is where the real test starts.
+ #
+ # Different operations may have different args in their list.
+ # REPTEST: Args are method, niter, nloops
+ set op [lindex $oplist 0]
+ if { $op == "REPTEST" } {
+ #
+ # This test writes the marker, so close after it runs.
+ #
+ rep065scr_reptest $repenv $oplist $markerdb
+ error_check_good marker_close [$markerdb close] 0
+ }
+ if { $op == "REPTEST_GET" } {
+ #
+ # This test needs to poll the marker. So close it now.
+ #
+ error_check_good marker_close [$markerdb close] 0
+ rep065scr_repget $repenv $oplist $mydir $markerfile
+ }
+ if { $op == "REP_ELECT" } {
+ #
+ # This test writes the marker, so close after it runs.
+ #
+ rep065scr_elect $repenv $oplist $markerdb
+ }
+ puts "Closing env"
+ $repenv mpool_sync
+ error_check_good envclose [$repenv close] 0
+
+}
+
+proc rep065scr_msgs { role envid msgdir mydir allids markerfile } {
+ global qtestdir
+ global repfiles_in_memory
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ #
+ # The main test process will write the marker file when it
+ # has started and when it has completed. We need to
+ # open/close the marker file because we are in a separate
+ # process from the writer and we cannot share an env because
+ # we might be a different BDB release version.
+ #
+ set markerdb [berkdb_open -create -btree $markerfile]
+ error_check_good marker [is_valid_db $markerdb] TRUE
+ set s [$markerdb get START$envid]
+ while { [llength $s] == 0 } {
+ error_check_good marker_close [$markerdb close] 0
+ tclsleep 1
+ set markerdb [berkdb_open $markerfile]
+ error_check_good marker [is_valid_db $markerdb] TRUE
+ set s [$markerdb get START$envid]
+ }
+
+ puts "repladd_noenv $allids"
+ set qtestdir $msgdir
+ foreach id $allids {
+ repladd_noenv $id
+ }
+
+ puts "set up env cmd"
+ if { $role == "MASTER" } {
+ set rep_env_cmd "berkdb_env_noerr -home $mydir \
+ -errpfx MASTER -txn -rep_master $repmemargs \
+ -rep_transport \[list $envid replsend_noenv\]"
+ set rep_env_cmd "berkdb_env_noerr -home $mydir \
+ -errpfx MASTER -txn -rep_master $repmemargs \
+ -verbose {rep on} -errfile /dev/stderr \
+ -rep_transport \[list $envid replsend_noenv\]"
+ } elseif { $role == "CLIENT" } {
+ set rep_env_cmd "berkdb_env_noerr -home $mydir \
+ -errpfx CLIENT -txn -rep_client $repmemargs \
+ -rep_transport \[list $envid replsend_noenv\]"
+ set rep_env_cmd "berkdb_env_noerr -home $mydir \
+ -errpfx CLIENT -txn -rep_client $repmemargs \
+ -verbose {rep on} -errfile /dev/stderr \
+ -rep_transport \[list $envid replsend_noenv\]"
+ } else {
+ puts "FAIL: unrecognized replication role $role"
+ return
+ }
+
+ # Change directories to where this will run.
+ cd $mydir
+
+ puts "open repenv $rep_env_cmd"
+ set repenv [eval $rep_env_cmd]
+ error_check_good repenv_open [is_valid_env $repenv] TRUE
+
+ set envlist "{$repenv $envid}"
+ puts "repenv is $repenv"
+ while { 1 } {
+ if { [llength [$markerdb get DONE]] != 0 } {
+ break
+ }
+ process_msgs $envlist 0 NONE NONE 1
+ error_check_good marker_close [$markerdb close] 0
+ set markerdb [berkdb_open $markerfile]
+ error_check_good marker [is_valid_db $markerdb] TRUE
+ tclsleep 1
+ }
+ #
+ # Process messages in case there are a few more stragglers.
+ # Just because the main test is done doesn't mean that all
+ # the messaging is done. Loop for messages as long as
+ # progress is being made.
+ #
+ set nummsg 1
+ while { $nummsg != 0 } {
+ process_msgs $envlist 0 NONE NONE 1
+ tclsleep 1
+ # First look at messages from us
+ set nummsg [replmsglen_noenv $envid from]
+ puts "Still have $nummsg not yet processed by others"
+ }
+ error_check_good marker_close [$markerdb close] 0
+ replclear_noenv $envid from
+ tclsleep 1
+ replclear_noenv $envid
+ $repenv mpool_sync
+ error_check_good envclose [$repenv close] 0
+}
+
+proc rep065scr_verify { oplist mydir id } {
+ global util_path
+
+ set rep_env_cmd "berkdb_env_noerr -home $mydir -txn \
+ -rep_transport \[list $id replnoop\]"
+
+ # Change directories to where this will run.
+ # !!!
+ # mydir is an absolute path of the form
+ # <path>/build_unix/TESTDIR/MASTERDIR or
+ # <path>/build_unix/TESTDIR/CLIENTDIR.0
+ #
+ # So we want to run relative to the build_unix directory
+ cd $mydir/../..
+
+ foreach op $oplist {
+ set repenv [eval $rep_env_cmd]
+ error_check_good env_open [is_valid_env $repenv] TRUE
+ if { $op == "DB" } {
+ set dbname "$mydir/test.db"
+ set db [berkdb_open -env $repenv -rdonly $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+ set method [$db get_type]
+ if { [is_record_based $method] == 1 } {
+ dump_file $db $txn $mydir/VERIFY/dbdump \
+ rep_test_upg.recno.check
+ } else {
+ dump_file $db $txn $mydir/VERIFY/dbdump \
+ rep_test_upg.check
+ }
+ error_check_good dbclose [$db close] 0
+ }
+ if { $op == "LOG" } {
+ set lgstat [$repenv log_stat]
+ set lgfile [stat_field $repenv log_stat "Current log file number"]
+ set lgoff [stat_field $repenv log_stat "Current log file offset"]
+ puts "Current LSN: $lgfile $lgoff"
+ set f [open $mydir/VERIFY/loglsn w]
+ puts $f $lgfile
+ puts $f $lgoff
+ close $f
+
+ set stat [catch {eval exec $util_path/db_printlog \
+ -h $mydir > $mydir/VERIFY/prlog} result]
+ if { $stat != 0 } {
+ puts "PRINTLOG: $result"
+ }
+ error_check_good stat_prlog $stat 0
+ }
+ error_check_good envclose [$repenv close] 0
+ }
+ #
+ # Run recovery locally so that any later upgrades are ready
+ # to be upgraded.
+ #
+ set stat [catch {eval exec $util_path/db_recover -h $mydir} result]
+ if { $stat != 0 } {
+ puts "RECOVERY: $result"
+ }
+ error_check_good stat_rec $stat 0
+
+}
+
+set usage "upgradescript type role op envid allids ctldir mydir reputils_path"
+
+# Verify usage
+if { $argc != 8 } {
+ puts stderr "Argc $argc, argv $argv"
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set type [ lindex $argv 0 ]
+set role [ lindex $argv 1 ]
+set op [ lindex $argv 2 ]
+set envid [ lindex $argv 3 ]
+set allids [ lindex $argv 4 ]
+set ctldir [ lindex $argv 5 ]
+set mydir [ lindex $argv 6 ]
+set reputils_path [ lindex $argv 7 ]
+
+set histdir $mydir/../..
+puts "Histdir $histdir"
+
+set msgtestdir $ctldir/TESTDIR
+
+global env
+cd $histdir
+set stat [catch {eval exec ./db_printlog -V} result]
+if { $stat != 0 } {
+ set env(LD_LIBRARY_PATH) ":$histdir:$histdir/.libs:$env(LD_LIBRARY_PATH)"
+}
+source ./include.tcl
+source $test_path/test.tcl
+
+# The global variable noenv_messaging must be set after sourcing
+# test.tcl or its value will be wrong.
+global noenv_messaging
+set noenv_messaging 1
+
+set is_repchild 1
+puts "Did args. now source reputils"
+source $reputils_path/reputils.tcl
+source $reputils_path/reputilsnoenv.tcl
+
+set markerdir $msgtestdir/MARKER
+set markerfile $markerdir/marker.db
+
+puts "Calling proc for type $type"
+if { $type == "START" } {
+ rep065scr_starttest $role $op $envid $msgtestdir $mydir $allids $markerfile
+} elseif { $type == "PROCMSGS" } {
+ rep065scr_msgs $role $envid $msgtestdir $mydir $allids $markerfile
+} elseif { $type == "VERIFY" } {
+ file mkdir $mydir/VERIFY
+ rep065scr_verify $op $mydir $envid
+} else {
+ puts "FAIL: unknown type $type"
+ return
+}
diff --git a/db-4.8.30/test/rep066.tcl b/db-4.8.30/test/rep066.tcl
new file mode 100644
index 0000000..f0f5ff3
--- /dev/null
+++ b/db-4.8.30/test/rep066.tcl
@@ -0,0 +1,269 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep066
+# TEST Replication and dead log handles.
+# TEST
+# TEST Run rep_test on master and a client.
+# TEST Simulate client crashes (master continues) until log 2.
+# TEST Open 2nd master env handle and put something in log and flush.
+# TEST Downgrade master, restart client as master.
+# TEST Run rep_test on newmaster until log 2.
+# TEST New master writes log records, newclient processes records
+# TEST and 2nd newclient env handle calls log_flush.
+# TEST New master commits, newclient processes and should succeed.
+# TEST Make sure 2nd handle detects the old log handle and doesn't
+# TEST write to a stale handle (if it does, the processing of the
+# TEST commit will fail).
+#
+proc rep066 { method { niter 10 } { tnum "066" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ # This test requires a second handle on an env, and HP-UX
+ # doesn't support that.
+ if { $is_hp_test } {
+ puts "Skipping rep$tnum for HP-UX."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Rep$tnum: Skipping\
+ for in-memory logs with -recover."
+ continue
+ }
+ puts "Rep$tnum ($method $r):\
+ Replication and dead log handles $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep066_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep066_sub { method niter tnum logset recargs largs } {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ # Later we'll open a 2nd handle to this env.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $repmemargs \
+ $m_logargs -errpfx ENV0 -log_max $log_max $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set env0 [eval $ma_envcmd $recargs -rep_master]
+ set masterenv $env0
+
+ # Open a client.
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $repmemargs \
+ $c_logargs -errpfx ENV1 -log_max $log_max $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set env1 [eval $cl_envcmd $recargs -rep_client]
+ set clientenv $env1
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$env0 1} {$env1 2}"
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update clients).
+ puts "\tRep$tnum.a.0: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ set nstart $niter
+ set last_client_log [get_logfile $env1 last]
+ set stop 0
+ while { $stop == 0 } {
+ puts "\tRep$tnum.b: Run test on master until log file changes."
+ eval rep_test\
+ $method $masterenv NULL $niter $nstart $nstart 0 $largs
+ incr nstart $niter
+ replclear 2
+ set last_master_log [get_logfile $masterenv last]
+ if { $last_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ # Open a 2nd env handle on the master.
+ # We want to have some operations happen on the normal
+ # handle and then flush them with this handle.
+ puts "\tRep$tnum.c: Open 2nd master env and flush log."
+ set 2ndenv [eval $ma_envcmd -rep_master -errpfx 2NDENV]
+ error_check_good master_env [is_valid_env $2ndenv] TRUE
+
+
+ # Set up databases as in-memory or on-disk.
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+
+ set omethod [convert_method $method]
+ set txn [$masterenv txn]
+ error_check_good txn [is_valid_txn $txn $masterenv] TRUE
+ set db [eval {berkdb_open_noerr -env $masterenv -errpfx MASTER \
+ -txn $txn -create -mode 0644} $largs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Flush on the 2nd handle
+ set lf [stat_field $2ndenv log_stat "Times log flushed to disk"]
+ error_check_good flush [$2ndenv log_flush] 0
+ set lf2 [stat_field $2ndenv log_stat "Times log flushed to disk"]
+ error_check_bad log_flush $lf $lf2
+
+ # The detection of dead log handle is based on a 1-second resolution
+ # timestamp comparison. Now that we've established the threatening
+ # source of the dead handle in $2ndenv, wait a moment to make sure that
+ # the fresh handle that we're about to create gets a later timestamp.
+ tclsleep 1
+
+ # Resolve the txn and close the database
+ error_check_good commit [$txn commit] 0
+ error_check_good close [$db close] 0
+
+ # Nuke those messages for client about to become master.
+ replclear 2
+
+ puts "\tRep$tnum.d: Swap envs"
+ set masterenv $env1
+ set clientenv $env0
+ error_check_good downgrade [$clientenv rep_start -client] 0
+ error_check_good upgrade [$masterenv rep_start -master] 0
+ set envlist "{$env0 1} {$env1 2}"
+ process_msgs $envlist
+
+ #
+ # At this point, env0 should have rolled back across the log file.
+ # We need to do some operations on the master, process them on
+ # the client (but not a commit because that flushes). We want
+ # the message processing client env (env0) to put records in
+ # the log buffer and the 2nd env handle to flush the log.
+ #
+ puts "\tRep$tnum.e: Run test until create new log file."
+ #
+ # Set this to the last log file the old master had.
+ #
+ set last_client_log $last_master_log
+ set last_master_log [get_logfile $masterenv last]
+ set stop 0
+ while { $stop == 0 } {
+ puts "\tRep$tnum.e: Run test on master until log file changes."
+ eval rep_test\
+ $method $masterenv NULL $niter $nstart $nstart 0 $largs
+ process_msgs $envlist
+ incr nstart $niter
+ set last_master_log [get_logfile $masterenv last]
+ if { $last_master_log == $last_client_log } {
+ set stop 1
+ }
+ }
+ puts "\tRep$tnum.f: Create some log records."
+ set txn [$masterenv txn]
+ error_check_good txn [is_valid_txn $txn $masterenv] TRUE
+ set db [eval {berkdb_open_noerr -env $masterenv -errpfx MASTER \
+ -txn $txn -create -mode 0644} $largs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ process_msgs $envlist
+ # Flush on the 2nd handle
+ puts "\tRep$tnum.g: Flush on 2nd env handle."
+ set lf [stat_field $2ndenv log_stat "Times log flushed to disk"]
+ error_check_good flush [$2ndenv log_flush] 0
+ set lf2 [stat_field $2ndenv log_stat "Times log flushed to disk"]
+ error_check_bad log_flush2 $lf $lf2
+
+ # Resolve the txn and close the database
+ puts "\tRep$tnum.h: Process commit on client env handle."
+ error_check_good commit [$txn commit] 0
+ error_check_good close [$db close] 0
+ process_msgs $envlist
+
+ error_check_good cl2_close [$2ndenv close] 0
+ error_check_good env0_close [$env0 close] 0
+ error_check_good env1_close [$env1 close] 0
+ replclose $testdir/MSGQUEUEDIR
+ return
+}
+
diff --git a/db-4.8.30/test/rep067.tcl b/db-4.8.30/test/rep067.tcl
new file mode 100644
index 0000000..9ba0d24
--- /dev/null
+++ b/db-4.8.30/test/rep067.tcl
@@ -0,0 +1,395 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep067
+# TEST Replication election test with large timeouts.
+# TEST
+# TEST Test replication elections among clients with widely varying
+# TEST timeouts. This test is used to simulate a customer that
+# TEST wants to force full participation in an election, but only
+# TEST if all sites are present (i.e. if all sites are restarted
+# TEST together). If any site has already been part of the group,
+# TEST then we want to be able to elect a master based on majority.
+# TEST Using varied timeouts, we can force full participation if
+# TEST all sites are present with "long_timeout" amount of time and
+# TEST then revert to majority.
+# TEST
+# TEST A long_timeout would be several minutes whereas a normal
+# TEST short timeout would be a few seconds.
+#
+proc rep067 { method args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep067: Skipping for method $method."
+ return
+ }
+
+ set tnum "067"
+ set niter 10
+ set nclients 3
+ set logsets [create_logsets [expr $nclients + 1]]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # We don't want to run this with -recover - it takes too
+ # long and doesn't cover any new ground.
+ set recargs ""
+ foreach l $logsets {
+ puts "Rep$tnum ($recargs): Replication election mixed\
+ long timeouts with $nclients clients $msg $msg2."
+ puts -nonewline "Rep$tnum: Started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep067_sub $method $tnum \
+ $niter $nclients $l $recargs $args
+ }
+}
+
+proc rep067_sub { method tnum niter nclients logset recargs largs } {
+ source ./include.tcl
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create -log_max 1000000 \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_logargs $verbargs -errpfx MASTER \
+ $m_txnargs -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M) $recargs]
+
+ set envlist {}
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create \
+ -event rep_event $repmemargs -home $clientdir($i) \
+ $c_logargs($i) $c_txnargs($i) -rep_client $verbargs \
+ -errpfx CLIENT.$i -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i) $recargs]
+ lappend envlist "$clientenv($i) $envid"
+ }
+
+ # Process startup messages
+ process_msgs $envlist
+
+ # Run a modified test001 in the master.
+ puts "\tRep$tnum.a: Running test001 in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ # Process all the messages and close the master.
+ process_msgs $envlist
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ #
+ # Make sure all clients are starting with no pending messages.
+ #
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+
+ #
+ # Run the test for all different timoeut combinations.
+ #
+ set c0to { long medium }
+ set c1to { medium short }
+ set c2to { short long }
+ set numtests [expr [llength $c0to] * [llength $c1to] * \
+ [llength $c2to]]
+ set m "Rep$tnum"
+ set count 0
+ set last_win -1
+ set win -1
+ set quorum { majority all }
+ foreach q $quorum {
+ puts "\t$m.b: Starting $numtests election with\
+ timeout tests: $q must participate"
+ foreach c0 $c0to {
+ foreach c1 $c1to {
+ foreach c2 $c2to {
+ set elist [list $c0 $c1 $c2]
+ rep067_elect env_cmd envlist $qdir \
+ $m $count win last_win $elist \
+ $q $logset
+ incr count
+ }
+ }
+ }
+ }
+
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+ puts -nonewline \
+ "Rep$tnum: Completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+}
+
+proc rep067_elect { ecmd celist qdir msg count \
+ winner lsn_lose elist quorum logset} {
+ global elect_timeout elect_serial
+ global timeout_ok
+ global databases_in_memory
+ upvar $ecmd env_cmd
+ upvar $celist envlist
+ upvar $winner win
+ upvar $lsn_lose last_win
+
+ # Set the proper value for the first time through the
+ # loop. On subsequent passes, timeout_ok will already
+ # be set.
+ if { [info exists timeout_ok] == 0 } {
+ set timeout_ok 0
+ }
+
+ set nclients [llength $elist]
+ set nsites [expr $nclients + 1]
+
+ #
+ # Set long timeout to 3 minutes (180 sec).
+ # Set medium timeout to half the long timeout.
+ # Set short timeout to 10 seconds.
+ set long_timeout 180000000
+ set med_timeout [expr $long_timeout / 2]
+ set short_timeout 10000000
+ set cl_list {}
+ foreach pair $envlist {
+ set id [lindex $pair 1]
+ set i [expr $id - 2]
+ set clientenv($i) [lindex $pair 0]
+ set to [lindex $elist $i]
+ if { $to == "long" } {
+ set elect_timeout($i) $long_timeout
+ } elseif { $to == "medium" } {
+ set elect_timeout($i) $med_timeout
+ } elseif { $to == "short" } {
+ set elect_timeout($i) $short_timeout
+ }
+ set elect_pipe($i) INVALID
+ set err_cmd($i) "none"
+ replclear $id
+ lappend cl_list $i
+ }
+
+ # Select winner. We want to test biggest LSN wins, and secondarily
+ # highest priority wins. If we already have a master, make sure
+ # we don't start a client in that master.
+ set elector 0
+ if { $win == -1 } {
+ if { $last_win != -1 } {
+ set cl_list [lreplace $cl_list $last_win $last_win]
+ set elector $last_win
+ }
+ set windex [berkdb random_int 0 [expr [llength $cl_list] - 1]]
+ set win [lindex $cl_list $windex]
+ } else {
+ # Easy case, if we have a master, the winner must be the
+ # same one as last time, just use $win.
+ # If client0 is the current existing master, start the
+ # election in client 1.
+ if {$win == 0} {
+ set elector 1
+ }
+ }
+ # Winner has priority 100. If we are testing LSN winning, the
+ # make sure the lowest LSN client has the highest priority.
+ # Everyone else has priority 10.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set crash($i) 0
+ if { $i == $win } {
+ set pri($i) 100
+ } elseif { $i == $last_win } {
+ set pri($i) 200
+ } else {
+ set pri($i) 10
+ }
+ }
+
+ puts "\t$msg.b.$count: Start election (win=client$win) $elist"
+ set msg $msg.c.$count
+ #
+ # If we want all sites, then set nsites and nvotes the same.
+ # otherwise, we need to increase nsites to account
+ # for the master that is "down".
+ #
+ if { $quorum == "all" } {
+ set nsites $nclients
+ } else {
+ set nsites [expr $nclients + 1]
+ }
+ set nvotes $nclients
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ run_election env_cmd envlist err_cmd pri crash \
+ $qdir $msg $elector $nsites $nvotes $nclients $win \
+ 0 $dbname 0 $timeout_ok
+ #
+ # Sometimes test elections with an existing master.
+ # Other times test elections without master by closing the
+ # master we just elected and creating a new client.
+ # We want to weight it to close the new master. So, use
+ # a list to cause closing about 70% of the time.
+ #
+ set close_list { 0 0 0 1 1 1 1 1 1 1}
+ set close_len [expr [llength $close_list] - 1]
+ set close_index [berkdb random_int 0 $close_len]
+
+ # Unless we close the master, the next election will time out.
+ set timeout_ok 1
+
+ if { [lindex $close_list $close_index] == 1 } {
+ # Declare that we expect the next election to succeed.
+ set timeout_ok 0
+ puts -nonewline "\t\t$msg: Closing "
+ error_check_good newmaster_flush [$clientenv($win) log_flush] 0
+ error_check_good newmaster_close [$clientenv($win) close] 0
+ #
+ # If the next test should win via LSN then remove the
+ # env before starting the new client so that we
+ # can guarantee this client doesn't win the next one.
+ set lsn_win { 0 0 0 0 1 1 1 1 1 1 }
+ set lsn_len [expr [llength $lsn_win] - 1]
+ set lsn_index [berkdb random_int 0 $lsn_len]
+ set rec_arg ""
+ set win_inmem [expr [string compare [lindex $logset \
+ [expr $win + 1]] in-memory] == 0]
+ if { [lindex $lsn_win $lsn_index] == 1 } {
+ set last_win $win
+ set dirindex [lsearch -exact $env_cmd($win) "-home"]
+ incr dirindex
+ set lsn_dir [lindex $env_cmd($win) $dirindex]
+ env_cleanup $lsn_dir
+ puts -nonewline "and cleaning "
+ } else {
+ #
+ # If we're not cleaning the env, decide if we should
+ # run recovery upon reopening the env. This causes
+ # two things:
+ # 1. Removal of region files which forces the env
+ # to read its __db.rep.egen file.
+ # 2. Adding a couple log records, so this client must
+ # be the next winner as well since it'll have the
+ # biggest LSN.
+ #
+ set rec_win { 0 0 0 0 0 0 1 1 1 1 }
+ set rec_len [expr [llength $rec_win] - 1]
+ set rec_index [berkdb random_int 0 $rec_len]
+ if { [lindex $rec_win $rec_index] == 1 } {
+ puts -nonewline "and recovering "
+ set rec_arg "-recover"
+ #
+ # If we're in memory and about to run
+ # recovery, we force ourselves not to win
+ # the next election because recovery will
+ # blow away the entire log in memory.
+ # However, we don't skip this entirely
+ # because we still want to force reading
+ # of __db.rep.egen.
+ #
+ if { $win_inmem } {
+ set last_win $win
+ } else {
+ set last_win -1
+ }
+ } else {
+ set last_win -1
+ }
+ }
+ puts "new master, new client $win"
+ set clientenv($win) [eval $env_cmd($win) $rec_arg]
+ error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE
+ #
+ # Since we started a new client, we need to replace it
+ # in the message processing list so that we get the
+ # new Tcl handle name in there.
+ set newelector "$clientenv($win) [expr $win + 2]"
+ set envlist [lreplace $envlist $win $win $newelector]
+ if { $rec_arg == "" || $win_inmem } {
+ set win -1
+ }
+ #
+ # Since we started a new client we want to give them
+ # all a chance to process everything outstanding before
+ # the election on the next iteration.
+ #
+ process_msgs $envlist
+ }
+}
diff --git a/db-4.8.30/test/rep068.tcl b/db-4.8.30/test/rep068.tcl
new file mode 100644
index 0000000..3f61fbd
--- /dev/null
+++ b/db-4.8.30/test/rep068.tcl
@@ -0,0 +1,206 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep068
+# TEST Verify replication of dbreg operations does not hang clients.
+# TEST In a simple replication group, create a database with very
+# TEST little data. With DB_TXN_NOSYNC the database can be created
+# TEST at the client even though the log is not flushed. If we crash
+# TEST and restart, the application of the log starts over again, even
+# TEST though the database is still there. The application can open
+# TEST the database before replication tries to re-apply the create.
+# TEST This causes a hang as replication waits to be able to get a
+# TEST handle lock.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc rep068 { method { tnum "068" } args } {
+
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Run for btree methods only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree method."
+ return
+ }
+
+ # This test requires a second handle on an env, and HP-UX
+ # doesn't support that.
+ if { $is_hp_test } {
+ puts "Skipping rep$tnum for HP-UX."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with/without recovery and txn nosync.
+ foreach s {"nosync" ""} {
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { (($r == "-recover") || ($s == "nosync"))
+ && ($logindex != -1) } {
+ puts "Skipping test with -recover or\
+ nosync for in-memory logs."
+ continue
+ }
+ # Temporary note: at the moment, this test
+ # fails when both "-recover" and
+ # "nosync" are in use, because of problems
+ # described in SR #15071.
+ if { ($r == "-recover") && ($s == "nosync") } {
+ puts "Skipping test with -recover or\
+ nosync."
+ continue
+ }
+ puts "Rep$tnum ($method $r $s): Test of\
+ dbreg lock conflicts at client $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep068_sub $method $tnum $l $r $s $args
+ }
+ }
+ }
+}
+
+proc rep068_sub { method tnum logset recargs nosync largs } {
+ global testdir
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set KEY "any old key"
+ set DATA "arbitrary data"
+ set DBNAME "test.db"
+
+ set nosync_args [subst {-txn $nosync}]
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. Adjust the args for master
+ # and client
+ # There is no need to adjust txn args for this test since
+ # the txn args are explicitly set.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_logargs \
+ $verbargs -errpfx MASTER $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs $nosync_args -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_logargs \
+ $verbargs -errpfx CLIENT $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs $nosync_args -rep_client]
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Open/create a database, maybe put just one record in it
+ # abandon the client env, and restart it. Before trying to sync,
+ # open the database at the client.
+
+ set db [berkdb_open_noerr -auto_commit \
+ -btree -create -env $masterenv $DBNAME]
+ set ret [$db put $KEY $DATA]
+ error_check_good initial_insert $ret 0
+ process_msgs $envlist
+
+ # Simulate a crash and restart of the client, by simply abandoning
+ # the old environment handle and opening a new one.
+ #
+ puts "\tRep$tnum.a: Open a fresh handle onto the client env."
+ set origclientenv $clientenv
+ set clientenv [eval $cl_envcmd $recargs $nosync_args -rep_client]
+ set envlist "{$masterenv 1} {$clientenv 2}"
+
+ # We expect the db creation operation to have been flushed to the log,
+ # so that at this point recovery will have removed the database (since
+ # we expect the transaction did not commit). But the bug we are testing
+ # for is that the applying of replicated transactions hangs if the
+ # database turns out to be present. Thus, for a stringent test, we want
+ # to at least try to open the database, and "dare ourselves" not to hang
+ # if it turns out to be present.
+ #
+ if {[catch {set client_db [berkdb_open_noerr \
+ -auto_commit -unknown -env $clientenv $DBNAME]} result] == 0} {
+ puts "\t\tRep$tnum.a(ii): warning: db open at restarted client\
+ succeeded unexpectedly"
+ } else {
+ set client_db "NULL"
+ }
+
+ puts "\tRep$tnum.b: Attempting sync-up with db handle open."
+ process_msgs $envlist
+ puts "\tRep$tnum.c: Sync-up completed."
+
+ if {$client_db == "NULL"} {
+ set client_db [berkdb_open_noerr \
+ -auto_commit -unknown -env $clientenv $DBNAME]
+ }
+ set result [$client_db get $KEY]
+ error_check_good one_pair [llength $result] 1
+ set val [lindex $result 0 1]
+ error_check_good "value still matches" $val $DATA
+ puts "\tRep$tnum.d: Confirmed correct data."
+
+ $client_db close
+ $clientenv close
+ catch { $origclientenv close } res
+
+ $db close
+ $masterenv close
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep069.tcl b/db-4.8.30/test/rep069.tcl
new file mode 100644
index 0000000..15a998c
--- /dev/null
+++ b/db-4.8.30/test/rep069.tcl
@@ -0,0 +1,295 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep069
+# TEST Test of internal initialization and elections.
+# TEST
+# TEST If a client is in a recovery mode of any kind, it
+# TEST participates in elections at priority 0 so it can
+# TEST never be elected master.
+#
+proc rep069 { method { niter 200 } { tnum "069" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set saved_args $args
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ set args $saved_args
+ puts "Rep$tnum ($method $args): Test internal\
+ initialization and elections $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep069_sub $method $niter $tnum $l $args
+ }
+}
+
+proc rep069_sub { method niter tnum logset largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+ global timeout_ok
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set nclients 2
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ # Log size is small so we quickly create more than one, and
+ # can easily force internal initialization.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ set envlist {}
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $repmemargs \
+ $m_logargs -log_max $log_max -event rep_event $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd -recover -rep_master]
+ lappend envlist "$masterenv 1"
+
+ # Open clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set envcmd($i) "berkdb_env_noerr -create \
+ $repmemargs \
+ $c_txnargs $c_logargs -log_max $log_max \
+ -home $clientdir($i) -event rep_event $verbargs \
+ -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $envcmd($i) -recover -rep_client]
+ lappend envlist "$clientenv($i) $envid"
+ }
+
+ # Bring the clients online by processing the startup messages.
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # db_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Run rep_test in the master and update clients.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist 0 NONE err
+ error_check_good process_msgs $err 0
+
+ # Find out what exists on the client. We need to loop until
+ # the first master log file > last client log file. The two
+ # clients should be the same, so just inspect one.
+ puts "\tRep$tnum.b: Close clients."
+ if { $c_logtype != "in-memory" } {
+ set res [eval exec $util_path/db_archive -l -h $clientdir(0)]
+ set res [eval exec $util_path/db_archive -l -h $clientdir(1)]
+ }
+ set last_client_log [get_logfile $clientenv(1) last]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good client_close [$clientenv($i) close] 0
+ }
+ set envlist [lreplace $envlist 1 2]
+
+ # Run the master forward.
+ set stop 0
+ while { $stop == 0 } {
+ puts "\tRep$tnum.c: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+
+ puts "\tRep$tnum.d: Run db_archive on master."
+ if { $m_logtype != "in-memory"} {
+ set res [eval \
+ exec $util_path/db_archive -d -h $masterdir]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ replclear $envid
+ }
+
+ # Reopen clients.
+ puts "\tRep$tnum.e: Reopen clients."
+ for { set i 0 } { $i < $nclients } { incr i } {
+ env_cleanup $clientdir($i)
+ set clientenv($i) [eval $envcmd($i) -recover -rep_client]
+ set envid [expr $i + 2]
+ lappend envlist "$clientenv($i) $envid"
+ }
+
+ # Run proc_msgs_once until both clients are in internal
+ # initialization.
+ #
+ # We figure out whether each client is in initialization
+ # by searching for any of the flags REP_F_RECOVER_UPDATE,
+ # REP_F_RECOVER_PAGE, and REP_F_RECOVER_LOG. As soon as
+ # a client produces one of these, it's marked as being
+ # in initialization, and stays that way even if it proceeds
+ # further, but we don't exit the loop until all clients
+ # have gotten into initialization.
+ #
+ puts "\tRep$tnum.f:\
+ Run proc_msgs_once until all clients enter internal init."
+ set in_init 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set initializing($i) 0
+ }
+
+ while { $in_init != 1 } {
+ set nproced [proc_msgs_once $envlist NONE err]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set stat($i) \
+ [exec $util_path/db_stat -r -R A -h $clientdir(1)]
+ if {[is_substr $stat($i) "REP_F_RECOVER_UPDATE"] } {
+ set initializing($i) 1
+ }
+ if {[is_substr $stat($i) "REP_F_RECOVER_PAGE"] } {
+ set initializing($i) 1
+ }
+ if {[is_substr $stat($i) "REP_F_RECOVER_LOG"] } {
+ set initializing($i) 1
+ }
+ }
+ set in_init 1
+ for { set i 0 } { $i < $nclients } { incr i } {
+ if { $initializing($i) == 0 } {
+ set in_init 0
+ }
+ }
+ }
+
+ # Call an election. It should fail, because both clients
+ # are in internal initialization and therefore not electable.
+ # Indicate failure with winner = -2.
+ # First, close the master.
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ puts "\tRep$tnum.g: Run election; no one will get elected."
+ set m "Rep$tnum.g"
+ set nsites $nclients
+ set nvotes $nclients
+ set winner -2
+ set elector 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set err_cmd($i) "none"
+ set crash($i) 0
+ set pri($i) 10
+ }
+
+ # This election will time out instead of succeeding.
+ set timeout_ok 1
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+ run_election envcmd envlist err_cmd pri crash \
+ $qdir $m $elector $nsites $nvotes $nclients $winner \
+ 0 $dbname 0 $timeout_ok
+
+ # Verify that each client saw the message that no
+ # electable site was found.
+ puts "\tRep$tnum.h: Check for right error message."
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set none_electable 0
+ set id [expr $i + 1]
+ set fd [open $testdir/ELECTION_RESULT.$id r]
+ while { [gets $fd str] != -1 } {
+ if { [is_substr $str "Unable to elect a master"] == 1 } {
+ set none_electable 1
+ break
+ }
+ }
+ close $fd
+ error_check_good none_electable $none_electable 1
+ }
+
+ # Clean up for the next pass.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ $clientenv($i) close
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
diff --git a/db-4.8.30/test/rep070.tcl b/db-4.8.30/test/rep070.tcl
new file mode 100644
index 0000000..145c906
--- /dev/null
+++ b/db-4.8.30/test/rep070.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep070
+# TEST Test of startup_done condition with idle master.
+# TEST
+# TEST Join a client to an existing master, and verify that
+# TEST the client detects startup_done even if the master
+# TEST does not execute any new transactions.
+#
+proc rep070 { method { niter 200 } { tnum "070" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ # Run for btree and queue only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 ||\
+ [is_queue $method] == 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 && [is_queue $method] != 1 } {
+ puts "Skipping rep070 for method $method."
+ return
+ }
+
+ # This test does not cover any new ground with in-memory
+ # databases.
+ if { $databases_in_memory } {
+ puts "Skipping Rep$tnum for in-memory databases."
+ return
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ #
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $args): Test of\
+ internal initialization and startup_done $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep070_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep070_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $m_logargs $verbargs -errpfx MASTER $repmemargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Put some data into the database
+ puts "\tRep$tnum.a: Run rep_test in master env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+
+ # Open a client
+ puts "\tRep$tnum.b: Open client."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $c_logargs $verbargs -errpfx CLIENT $repmemargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ rep070_verify_startup_done $clientenv $envlist
+
+ # Close and re-open the client. What happens next depends on whether
+ # we used -recover.
+ #
+ $clientenv close
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ if { $recargs == "-recover" } {
+ rep070_verify_startup_done $clientenv $envlist
+ } else {
+ error_check_good \
+ startup_still_done [rep070_startup_done $clientenv] 1
+ }
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1
+
+ check_log_location $masterenv
+ check_log_location $clientenv
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+# Verify that startup_done starts off false, then turns to true at some point,
+# and thereafter never reverts to false.
+#
+proc rep070_verify_startup_done { clientenv envlist } {
+ # Initially we should not yet have startup_done.
+ set got_startup_done [rep070_startup_done $clientenv]
+ error_check_good startup_not_done_yet $got_startup_done 0
+
+ # Bring the client online little by little.
+ #
+ while { [proc_msgs_once $envlist] > 0 } {
+ set done [rep070_startup_done $clientenv]
+
+ # At some point, startup_done gets turned on. Make sure it
+ # never gets turned off after that.
+ #
+ if { $got_startup_done } {
+ # We've seen startup_done previously.
+ error_check_good no_rescind $done 1
+ } else {
+ set got_startup_done $done
+ }
+ }
+ error_check_good startup_done $got_startup_done 1
+}
+
+proc rep070_startup_done { env } {
+ stat_field $env rep_stat "Startup complete"
+}
diff --git a/db-4.8.30/test/rep071.tcl b/db-4.8.30/test/rep071.tcl
new file mode 100644
index 0000000..4e53fe3
--- /dev/null
+++ b/db-4.8.30/test/rep071.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep071
+# TEST Test of multiple simultaneous client env handles and
+# TEST upgrading/downgrading. Tests use of temp db handle
+# TEST internally.
+# TEST
+# TEST Open a master and 2 handles to the same client env.
+# TEST Run rep_test.
+# TEST Close master and upgrade client to master using one env handle.
+# TEST Run rep_test again, and then downgrade back to client.
+#
+proc rep071 { method { niter 10 } { tnum "071" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ # We can't open two envs on HP-UX, so just skip the
+ # whole test since that is at the core of it.
+ if { $is_hp_test == 1 } {
+ puts "Rep$tnum: Skipping for HP-UX."
+ return
+ }
+
+ # This test depends on copying logs, so can't be run with
+ # in-memory logging.
+ global mixed_mode_logging
+ if { $mixed_mode_logging > 0 } {
+ puts "Rep$tnum: Skipping for mixed-mode logging."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach r $test_recopts {
+ puts "Rep$tnum ($method $r): Replication\
+ backup and synchronizing $msg $msg2."
+ rep071_sub $method $niter $tnum $r $args
+ }
+}
+
+proc rep071_sub { method niter tnum recargs largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ -home $masterdir -errpfx MASTER $repmemargs \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create -txn nosync $verbargs \
+ -home $clientdir -errpfx CLIENT $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good clenv [is_valid_env $clientenv] TRUE
+ #
+ # Open a 2nd client handle to the same client env.
+ # This handle needs to be a full client handle so just
+ # use the same env command for both.
+ #
+ set 2ndclientenv [eval $cl_envcmd -rep_client -errpfx 2ND]
+ error_check_good cl2env [is_valid_env $2ndclientenv] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Downgrade master and upgrade client."
+ error_check_good master_close [$masterenv rep_start -client] 0
+ error_check_good client_close [$clientenv rep_start -master] 0
+
+ puts "\tRep$tnum.b: Run rep_test."
+ eval rep_test $method $clientenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.c: Downgrade back to client and upgrade master"
+ #
+ # The act of upgrading and downgrading an env, with another
+ # handle open had issues with open internal db handles.
+ # So, the existence of the 2nd client env handle is needed
+ # even though we're not doing anything active with that handle.
+ #
+ error_check_good client_close [$clientenv rep_start -client] 0
+ error_check_good master_close [$masterenv rep_start -master] 0
+
+ puts "\tRep$tnum.d: Run rep_test in master."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv
+
+ error_check_good master_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good clientenv_close [$2ndclientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep072.tcl b/db-4.8.30/test/rep072.tcl
new file mode 100644
index 0000000..bde2701
--- /dev/null
+++ b/db-4.8.30/test/rep072.tcl
@@ -0,0 +1,211 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep072
+# TEST Verify that internal init does not leak resources from
+# TEST the locking subsystem.
+
+proc rep072 { method { niter 200 } { tnum "072" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Run for btree and queue methods only.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || \
+ [is_queue $method] == 1 } {
+ lappend test_methods $method
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 && [is_queue $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree, non-queue method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set limit 3
+ set check true
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ puts "Rep$tnum ($method): Confirm internal init does not\
+ leak locks $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep072_sub $method $niter $tnum $l $limit $check $args
+ }
+}
+
+proc rep072_sub {method {niter 200} {tnum 072} logset \
+ {limit 3} {check true} largs} {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. Adjust the args for master
+ # and client.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $verbargs $repmemargs \
+ $m_logargs $m_txnargs -log_max $log_max -errpfx MASTER \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd -rep_master]
+ $masterenv rep_limit 0 0
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $verbargs $repmemargs \
+ $c_logargs $c_txnargs -log_max $log_max -errpfx CLIENT \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd -rep_client]
+ $clientenv rep_limit 0 0
+
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Clobber replication's 30-second anti-archive timer, which will have
+ # been started by client sync-up internal init, so that we can do a
+ # log_archive in a moment.
+ #
+ $masterenv test force noarchive_timeout
+
+ # $limit is the number of internal init cycles we want to try
+ for {set count 1} {$count <= $limit} {incr count} {
+ puts "\tRep$tnum.a: Try internal init cycle number $count"
+
+ # Run rep_test in the master.
+ puts "\tRep$tnum.b: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ puts "\tRep$tnum.c: Leave client alive, but isolated."
+
+ if { $c_logtype != "in-memory" } {
+ set res [exec $util_path/db_archive -l -h $clientdir]
+ }
+ set last_client_log [get_logfile $clientenv last]
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.d: Running rep_test in replicated env."
+ eval rep_test \
+ $method $masterenv NULL $niter 0 0 0 $largs
+ #
+ # Clear messages for client. We want that site
+ # to get far behind.
+ #
+ replclear 2
+ if { $m_logtype != "in-memory" } {
+ puts "\tRep$tnum.e: Run db_archive on master."
+ exec $util_path/db_archive -d -h $masterdir
+ set res [exec $util_path/db_archive -l \
+ -h $masterdir]
+ }
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > $last_client_log } {
+ set stop 1
+ }
+ }
+
+ #
+ # Run rep_test one more time, this time letting client see
+ # messages. This will induce client to ask master for missing
+ # log records, leading to internal init.
+ #
+ puts "\tRep$tnum.f: Running rep_test in replicated env."
+ set entries 10
+ eval rep_test $method \
+ $masterenv NULL $entries $niter 0 0 $largs
+ process_msgs $envlist
+
+ set n_lockers [stat_field \
+ $clientenv lock_stat "Current number of lockers"]
+ puts "\tRep$tnum.f: num lockers: $n_lockers"
+ if {$count == 1} {
+ set expected_lockers $n_lockers
+ } elseif {[string is true $check]} {
+ error_check_good leaking? $n_lockers $expected_lockers
+ }
+
+ if {$count < $limit} {
+ # Wait for replication "no-archive" timeout to expire
+ #
+ puts "\tRep$tnum.g: Sleep for 32 seconds"
+ tclsleep 32
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep073.tcl b/db-4.8.30/test/rep073.tcl
new file mode 100644
index 0000000..b075ad5
--- /dev/null
+++ b/db-4.8.30/test/rep073.tcl
@@ -0,0 +1,193 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep073
+# TEST
+# TEST Test of allowing clients to create and update their own scratch
+# TEST databases within the environment. Doing so requires the use
+# TEST use of the DB_TXN_NOT_DURABLE flag for those databases.
+#
+proc rep073 { method { niter 200 } { tnum "073" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ puts "Rep$tnum ($method $r $args): Test of\
+ non-durable databases and replication $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep073_sub $method $niter $tnum $l $r $args
+ }
+ }
+}
+
+proc rep073_sub { method niter tnum logset recargs largs } {
+ source ./include.tcl
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set omethod [convert_method $method]
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ -errpfx MASTER $repmemargs \
+ -home $masterdir $verbargs -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd $recargs -rep_master]
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $c_logargs \
+ -errpfx CLIENT $repmemargs \
+ -home $clientdir $verbargs -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd $recargs -rep_client]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ # Run rep_test in the master (and update client).
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+
+ # Set up databases as in-memory or on-disk.
+ if { $databases_in_memory } {
+ set mtestfile { "" "master.db" }
+ set ctestfile { "" "client.db" }
+ } else {
+ set mtestfile "master.db"
+ set ctestfile "client.db"
+ }
+
+ puts "\tRep$tnum.b: Open non-durable databases on master and client."
+ set mdb [berkdb_open -create -auto_commit \
+ -btree -env $masterenv -notdurable $mtestfile]
+ set cdb [berkdb_open -create -auto_commit \
+ -btree -env $clientenv -notdurable $ctestfile]
+ process_msgs $envlist
+
+ # Verify that neither file exists on the other site.
+ # Look for the file if it's on-disk, and try to open a handle
+ # if it's in-memory.
+ if { $databases_in_memory } {
+ catch { berkdb_open -env $clientenv $mtestfile } ret
+ error_check_good mtestfile [is_substr $ret "no such file"] 1
+ catch { berkdb_open -env $masterenv $ctestfile } ret
+ error_check_good ctestfile [is_substr $ret "no such file"] 1
+ } else {
+ error_check_good master_not_on_client \
+ [file exists $clientdir/$mtestfile] 0
+ error_check_good client_not_on_master \
+ [file exists $masterdir/$ctestfile] 0
+ }
+
+ #
+ # Now write to the master database, process messages and
+ # make sure nothing gets sent to the client.
+ #
+ puts "\tRep$tnum.c: Write to non-durable database on master."
+ eval rep_test $method $masterenv $mdb $niter $start $start 0 $largs
+ incr start $niter
+ process_msgs $envlist
+ if { $databases_in_memory } {
+ catch { berkdb_open -env $clientenv $mtestfile } ret
+ error_check_good mtestfile [is_substr $ret "no such file"] 1
+ } else {
+ error_check_good master_not_on_client \
+ [file exists $clientdir/$mtestfile] 0
+ }
+
+ # Make sure client can write to its own database.
+ puts "\tRep$tnum.d: Write to non-durable database on client."
+ eval rep_test $method $clientenv $cdb $niter $start $start 0 $largs
+ process_msgs $envlist
+
+ error_check_good mdb_close [$mdb close] 0
+ error_check_good cdb_close [$cdb close] 0
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv 0 1 1
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep074.tcl b/db-4.8.30/test/rep074.tcl
new file mode 100644
index 0000000..5026b1b
--- /dev/null
+++ b/db-4.8.30/test/rep074.tcl
@@ -0,0 +1,197 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep074
+# TEST Verify replication withstands send errors processing requests.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc rep074 { method { niter 20 } { tnum "074" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ puts "Rep$tnum ($method): Test of send errors processing\
+ requests $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep074_sub $method $niter $tnum $l $args
+ }
+}
+
+proc rep074_sub { method niter tnum logset largs } {
+ global testdir
+ global rep074_failure_count
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set rep074_failure_count -1
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. Adjust the args for master
+ # and client.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $verbargs -errpfx MASTER \
+ -home $masterdir $m_logargs $m_txnargs $repmemargs \
+ -rep_transport \[list 1 rep074_replsend\]"
+ set masterenv [eval $ma_envcmd -rep_master]
+
+ # Create some new records, so that the master will have something
+ # substantial to say when asked for LOG_REQ.
+ #
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ # Open a client
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT \
+ -home $clientdir $c_logargs $c_txnargs $repmemargs \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd -rep_client]
+ set envlist "{$masterenv 1} {$clientenv 2}"
+
+ # Bring the client online by processing the startup messages. This will
+ # cause the client to send a request to the master.
+ #
+ # In the first cycle, the client gets NEWMASTER and sends an UPDATE_REQ.
+ # In the second cycle, the master answers the UPDATE_REQ with an UPDATE,
+ # and the client sends a PAGE_REQ. Third, once we've gotten pages, we
+ # send a LOG_REQ.
+ #
+ # 1. NEWCLIENT -> NEWMASTER -> UPDATE_REQ
+ # 2. UPDATE -> PAGE_REQ
+ # 3. PAGE -> LOG_REQ
+ #
+ puts "\tRep$tnum.b: NEWMASTER -> UPDATE_REQ"
+ proc_msgs_once $envlist
+ puts "\tRep$tnum.c: UPDATE -> PAGE_REQ"
+ proc_msgs_once $envlist
+ puts "\tRep$tnum.d: PAGE -> LOG_REQ"
+ proc_msgs_once $envlist
+
+ # Force a sending error at the master while processing the LOG_REQ.
+ # We should ignore it, and return success to rep_process_message
+ #
+ puts "\tRep$tnum.e: Simulate a send error."
+ set rep074_failure_count [expr $niter / 2]
+ proc_msgs_once $envlist NONE errorp
+
+ puts "\tRep$tnum.f: Check for good return from rep_process_msg."
+ error_check_good rep_resilient $errorp 0
+
+ # Since we interrupted the flow with the simulated error, we don't have
+ # the log records we need yet.
+ #
+ error_check_bad startupdone \
+ [stat_field $clientenv rep_stat "Startup complete"] 1
+
+ #
+ # Run some more new txns at the master, so that the client eventually
+ # decides to request the remainder of the LOG_REQ response that it's
+ # missing. Pause for a second to make sure we reach the lower
+ # threshold for re-request on fast machines. We need to force a
+ # checkpoint because we need to create a gap, and then pause to
+ # reach the rerequest threshold.
+ #
+ set rep074_failure_count -1
+ $masterenv txn_checkpoint -force
+ process_msgs $envlist
+ tclsleep 1
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ error_check_good startupdone \
+ [stat_field $clientenv rep_stat "Startup complete"] 1
+
+ $masterenv close
+ $clientenv close
+ replclose $testdir/MSGQUEUEDIR
+}
+
+# Failure count < 0 turns off any special failure simulation processing.
+# When the count is > 0, it means we should process that many messages normally,
+# before invoking a failure.
+#
+proc rep074_replsend { control rec fromid toid flags lsn } {
+ global rep074_failure_count
+
+ if { $rep074_failure_count < 0 } {
+ return [replsend $control $rec $fromid $toid $flags $lsn]
+ }
+
+ if { $rep074_failure_count > 0 } {
+ incr rep074_failure_count -1
+ return [replsend $control $rec $fromid $toid $flags $lsn]
+ }
+
+ # Return an arbitrary non-zero value to indicate an error.
+ return 1
+}
diff --git a/db-4.8.30/test/rep075.tcl b/db-4.8.30/test/rep075.tcl
new file mode 100644
index 0000000..d9119d3
--- /dev/null
+++ b/db-4.8.30/test/rep075.tcl
@@ -0,0 +1,551 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep075
+# TEST Replication and prepared transactions.
+# TEST Test having outstanding prepared transactions and simulating
+# TEST crashing or upgrading or downgrading sites.
+# TEST
+#
+proc rep075 { method { tnum "075" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global mixed_mode_logging
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for all access methods.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep075: Skipping for method $method"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Swapping the envs is the only thing that should
+ # work for:
+ # HP, old Windows: can't open two handles on same env.
+ # in-memory logs: prepared txns don't survive recovery
+ # NIM databases: can't be recovered
+ #
+ if { $is_hp_test == 1 || $is_windows9x_test == 1 ||
+ $mixed_mode_logging > 0 || $databases_in_memory == 1 } {
+ set prep {swap}
+ } else {
+ set prep {dbrecover swap resolve recover envrecover}
+ }
+ set ops {commit abort both}
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+
+ }
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery.
+ foreach l $logsets {
+ foreach p $prep {
+ foreach o $ops {
+ puts "Rep$tnum ($method $p $o):\
+ Replication and prepared txns $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: close DBs after prepare"
+ rep075_sub $method $tnum $l $p $o 1 $args
+ puts "Rep$tnum: close DBs before prepare"
+ rep075_sub $method $tnum $l $p $o 0 $args
+ }
+ }
+ }
+}
+
+proc rep075_sub { method tnum logset prep op after largs } {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+ global util_path
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+ set m_logargs " -log_buffer $log_buf "
+ set c_logargs " -log_buffer $log_buf "
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs \
+ $repmemargs \
+ $m_logargs -errpfx ENV0 -log_max $log_max $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set env0 [eval $ma_envcmd -rep_master]
+ set masterenv $env0
+ error_check_good master_env [is_valid_env $env0] TRUE
+
+ # Open a client.
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $repmemargs \
+ $c_logargs -errpfx ENV1 -log_max $log_max $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set env1 [eval $cl_envcmd -rep_client]
+ set clientenv $env1
+ error_check_good client_env [is_valid_env $env1] TRUE
+
+ repladd 3
+ set cl2_envcmd "berkdb_env_noerr -create $c_txnargs \
+ $repmemargs \
+ $c_logargs -errpfx ENV2 -log_max $log_max $verbargs \
+ -home $clientdir2 -rep_transport \[list 3 replsend\]"
+ set env2 [eval $cl2_envcmd -rep_client]
+ set clientenv2 $env2
+ error_check_good client_env [is_valid_env $env2] TRUE
+
+ set omethod [convert_method $method]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$env0 1} {$env1 2} {$env2 3}"
+ process_msgs $envlist
+
+ #
+ # Run rep_test in a database with a sub database, or in a
+ # named in-memory database.
+ #
+ if { $databases_in_memory } {
+ set testfile { "" "test1.db" }
+ set testfile2 { "" "test2.db" }
+ set db1 [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile]
+ } else {
+ set testfile "test1.db"
+ set testfile2 "test2.db"
+ set sub "subdb"
+ set db1 [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile $sub]
+ }
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set niter 1
+ eval rep_test $method $masterenv $db1 $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ set db [eval {berkdb_open_noerr -env $masterenv -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile2]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ #
+ # Create and prepare 2 transactions:
+ # One txn is for the first database and one txn for the
+ # second database. We want to test that we can detect
+ # when the last restored txn has been resolved. And we
+ # want to test various files being open.
+ #
+ puts "\tRep$tnum.b: Prepare some txns."
+ set pbnyc 2
+ set key key
+ set data some_data
+ set txn1 [$masterenv txn]
+ error_check_good txn [is_valid_txn $txn1 $masterenv] TRUE
+ error_check_good put [$db1 put -txn $txn1 $key $data] 0
+
+ set gid [make_gid rep075:$txn1]
+ error_check_good commit [$txn1 prepare $gid] 0
+
+ set txn2 [$masterenv txn]
+ error_check_good txn [is_valid_txn $txn2 $masterenv] TRUE
+ error_check_good put [$db put -txn $txn2 $key $data] 0
+
+ set gid [make_gid rep075:$txn2]
+ error_check_good commit [$txn2 prepare $gid] 0
+ if { $after == 0 } {
+ $db1 close
+ $db close
+ }
+ process_msgs $envlist
+
+ #
+ # Now we have txns on a master that are PBNYC (prepared but
+ # not yet committed). Alter the replication system now
+ # based on what we're testing this time through.
+ #
+ puts "\tRep$tnum.c: Reset replication ($prep)."
+
+ if { $op == "commit" } {
+ set op1 commit
+ set op2 commit
+ } elseif { $op == "abort" } {
+ set op1 abort
+ set op2 abort
+ } else {
+ set i [berkdb random_int 0 1]
+ if { $i == 0 } {
+ set op1 commit
+ set op2 abort
+ } else {
+ set op1 abort
+ set op2 commit
+ }
+ }
+ set oplist [list $op1 $op2]
+ #
+ # If we are doing a swap, swap roles between master and client
+ # and then call txn recover. Master should then commit.
+ # This operation tests handling prepared txns in replication code.
+ #
+ # If we are doing a recover, each site stops using its old
+ # env handle and then opens a new one, with recovery.
+ # This operation tests handling prepared txns and then
+ # starting replication.
+ #
+ # If we are doing an envrecover, each site stops using its old
+ # env handle and then opens a new one, with recovery.
+ # Each site then opens a 2nd dbenv handle to run txn_recover
+ # and resolve each operation.
+ # This operation tests handling prepared txns and then
+ # starting replication.
+ #
+ # If we are doing a resolve, each site prepares the txns
+ # and then resolves the txns and then stops using the old
+ # env handle to cause a "crash". We then open a new one
+ # with recovery. This operation tests handling prepared
+ # txns and having them resolved.
+ #
+ if { $prep == "swap" } {
+ puts "\tRep$tnum.c.0: Swap roles master->client."
+ #
+ # A downgrading master must resolve the txns. So, commit
+ # them here, but don't send the messages to the client that
+ # is about to become master.
+ #
+ error_check_good commit [$txn1 commit] 0
+ error_check_good commit [$txn2 commit] 0
+ if { $after == 1 } {
+ $db1 close
+ $db close
+ }
+ replclear 2
+ replclear 3
+ set newclient $env0
+ error_check_good downgrade [$newclient rep_start -client] 0
+ set ctxnlist [$newclient txn_recover]
+ set newmaster $env1
+ puts "\tRep$tnum.c.1: Swap roles client->master."
+ error_check_good upgrade [$newmaster rep_start -master] 0
+ set txnlist [$newmaster txn_recover]
+
+ puts "\tRep$tnum.c.2: Check status of prepared txn."
+ error_check_good txnlist_len [llength $txnlist] $pbnyc
+ error_check_good txnlist_len [llength $ctxnlist] 0
+
+ #
+ # Now commit that old prepared txn.
+ #
+ puts "\tRep$tnum.c.3: Resolve prepared txn ($op)."
+ rep075_resolve $txnlist $oplist
+ } elseif { $prep == "recover" } {
+ #
+ # To simulate a crash, simply stop using the old handles
+ # and reopen new ones, with recovery. First flush both
+ # the log and mpool to disk.
+ #
+ set origenv0 $env0
+ set origenv1 $env1
+ set origtxn1 $txn1
+ set origtxn2 $txn2
+ puts "\tRep$tnum.c.0: Sync and recover master environment."
+ error_check_good flush1 [$env0 log_flush] 0
+ error_check_good sync1 [$env0 mpool_sync] 0
+ if { $after == 1 } {
+ $db1 close
+ $db close
+ }
+ set env0 [eval $ma_envcmd -recover]
+ error_check_good master_env [is_valid_env $env0] TRUE
+ puts "\tRep$tnum.c.1: Run txn_recover on master env."
+ set txnlist [$env0 txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $pbnyc
+ puts "\tRep$tnum.c.2: Resolve txn ($op) on master env."
+ rep075_resolve $txnlist $oplist
+
+ puts "\tRep$tnum.c.3: Sync and recover client environment."
+ error_check_good flush1 [$env1 log_flush] 0
+ error_check_good sync1 [$env1 mpool_sync] 0
+ set env1 [eval $cl_envcmd -recover]
+ error_check_good client_env [is_valid_env $env1] TRUE
+ puts "\tRep$tnum.c.4: Run txn_recover on client env."
+ set txnlist [$env1 txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $pbnyc
+
+ puts "\tRep$tnum.c.5: Resolve txn ($op) on client env."
+ rep075_resolve $txnlist $oplist
+
+ puts "\tRep$tnum.c.6: Restart replication on both envs."
+ error_check_good master [$env0 rep_start -master] 0
+ error_check_good client [$env1 rep_start -client] 0
+ set newmaster $env0
+ set envlist "{$env0 1} {$env1 2} {$env2 3}"
+ #
+ # Clean up old Tcl handles.
+ #
+ catch {$origenv0 close} res
+ catch {$origenv1 close} res
+ catch {$origtxn1 close} res
+ catch {$origtxn2 close} res
+ } elseif { $prep == "resolve" } {
+ #
+ # Check having prepared txns in the log, but they are
+ # also resolved before we "crash".
+ # To simulate a crash, simply stop using the old handles
+ # and reopen new ones, with recovery. First flush both
+ # the log and mpool to disk.
+ #
+ set origenv0 $env0
+ set origenv1 $env1
+ set origdb1 $db1
+ set origdb $db
+ puts "\tRep$tnum.c.0: Resolve ($op1 $op2) and recover master."
+ error_check_good resolve1 [$txn1 $op1] 0
+ error_check_good resolve2 [$txn2 $op2] 0
+ error_check_good flush0 [$env0 log_flush] 0
+ error_check_good sync0 [$env0 mpool_sync] 0
+ process_msgs $envlist
+ set env0 [eval $ma_envcmd -recover]
+ error_check_good master_env [is_valid_env $env0] TRUE
+ puts "\tRep$tnum.c.1: Run txn_recover on master env."
+ set txnlist [$env0 txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 0
+
+ puts "\tRep$tnum.c.2: Sync and recover client environment."
+ error_check_good flush1 [$env1 log_flush] 0
+ error_check_good sync1 [$env1 mpool_sync] 0
+ set env1 [eval $cl_envcmd -recover]
+ error_check_good client_env [is_valid_env $env1] TRUE
+ puts "\tRep$tnum.c.3: Run txn_recover on client env."
+ set txnlist [$env1 txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 0
+
+ puts "\tRep$tnum.c.4: Restart replication on both envs."
+ error_check_good master [$env0 rep_start -master] 0
+ error_check_good client [$env1 rep_start -client] 0
+ set newmaster $env0
+ set envlist "{$env0 1} {$env1 2} {$env2 3}"
+ catch {$origenv0 close} res
+ catch {$origenv1 close} res
+ catch {$origdb close} res
+ catch {$origdb1 close} res
+ } elseif { $prep == "envrecover" || $prep == "dbrecover" } {
+ #
+ # To simulate a crash, simply stop using the old handles
+ # and reopen new ones, with recovery. First flush both
+ # the log and mpool to disk.
+ #
+ set origenv0 $env0
+ set origenv1 $env1
+ set origtxn1 $txn1
+ set origtxn2 $txn2
+ puts "\tRep$tnum.c.0: Sync and recover master environment."
+ error_check_good flush1 [$env0 log_flush] 0
+ error_check_good sync1 [$env0 mpool_sync] 0
+ set oldgen [stat_field $env0 rep_stat "Generation number"]
+ error_check_good flush1 [$env1 log_flush] 0
+ error_check_good sync1 [$env1 mpool_sync] 0
+ if { $after == 1 } {
+ $db1 close
+ $db close
+ }
+ if { $prep == "dbrecover" } {
+ set recargs "-h $masterdir -c "
+ set stat [catch {eval exec $util_path/db_recover \
+ -e $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ set recargs "-h $clientdir -c "
+ set stat [catch {eval exec $util_path/db_recover \
+ -e $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ }
+ #
+ # !!!
+ # We still need to open with recovery, even if 'dbrecover'
+ # because db_recover cannot open the env with replication
+ # enabled. But db_recover will be the real recovery that
+ # needs to deal with the prepared txn. This recovery below
+ # for db_recover, should be a no-op essentially.
+ #
+ set recenv0 [eval $ma_envcmd -recover]
+ error_check_good master_env [is_valid_env $recenv0] TRUE
+ puts "\tRep$tnum.c.1: Run txn_recover on master env."
+ set env0 [eval $ma_envcmd]
+ error_check_good master_env [is_valid_env $env0] TRUE
+ set txnlist [$env0 txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $pbnyc
+ puts "\tRep$tnum.c.2: Resolve txn ($op) on master env."
+ rep075_resolve $txnlist $oplist
+ error_check_good recenv0_close [$recenv0 close] 0
+
+ puts "\tRep$tnum.c.3: Recover client environment."
+ set recenv1 [eval $cl_envcmd -recover -errpfx "ENV1REC"]
+ error_check_good client_env [is_valid_env $recenv1] TRUE
+ puts "\tRep$tnum.c.4: Run txn_recover on client env."
+ set env1 [eval $cl_envcmd -errpfx "ENV1NEW"]
+ error_check_good client_env [is_valid_env $env1] TRUE
+ set txnlist [$env1 txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $pbnyc
+
+ puts "\tRep$tnum.c.5: Resolve txns ($oplist) on client env."
+ rep075_resolve $txnlist $oplist
+ error_check_good recenv1_close [$recenv1 close] 0
+
+ puts "\tRep$tnum.c.6: Restart replication on both envs."
+ if { $prep == "dbrecover" } {
+ #
+ # XXX Since we ran db_recover, we lost the rep gen
+ # and clientenv2 cannot detect the change. Until
+ # SR 15396 is fixed, we'll fake it by becoming
+ # master, downgrading and then upgrading again to
+ # advance the generation number.
+ #
+ error_check_good master [$env0 rep_start -master] 0
+ error_check_good master [$env0 rep_start -client] 0
+ replclear 2
+ replclear 3
+ }
+ error_check_good master [$env0 rep_start -master] 0
+ set gen [stat_field $env0 rep_stat "Generation number"]
+ #
+ # If in-memory rep, restarting environment puts gen back
+ # to 1, the same as oldgen. envrecover doesn't do the extra
+ # rep_start, so gen is expected to stay at 1 in this case.
+ #
+ if { $repfiles_in_memory != 0 && $prep == "envrecover" } {
+ error_check_good gen $gen $oldgen
+ } else {
+ error_check_bad gen $gen $oldgen
+ }
+ error_check_good client [$env1 rep_start -client] 0
+ set newmaster $env0
+ set envlist "{$env0 1} {$env1 2} {$env2 3}"
+ process_msgs $envlist
+ #
+ # Clean up old Tcl handles.
+ #
+ catch {$origenv0 close} res
+ catch {$origenv1 close} res
+ catch {$origtxn1 close} res
+ catch {$origtxn2 close} res
+ }
+ #
+ # Run a standard rep_test creating test.db now.
+ #
+ eval rep_test $method $newmaster NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ #
+ # Verify whether or not the key exists in the databases both
+ # on the client and the master.
+ #
+ puts "\tRep$tnum.d: Verify prepared data."
+ foreach e $envlist {
+ set env [lindex $e 0]
+ if { $databases_in_memory } {
+ set db1 [eval {berkdb_open_noerr -env $env\
+ -auto_commit -create -mode 0644} $largs\
+ $omethod $testfile]
+ } else {
+ set db1 [eval {berkdb_open_noerr -env $env\
+ -auto_commit -create -mode 0644} $largs\
+ $omethod $testfile $sub]
+ }
+ error_check_good dbopen [is_valid_db $db1] TRUE
+ set db2 [eval {berkdb_open_noerr -env $env -auto_commit \
+ -create -mode 0644} $largs $omethod $testfile2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+ set k1 [$db1 get $key]
+ set k2 [$db2 get $key]
+ if { $op1 == "commit" } {
+ error_check_good key [llength $k1] 1
+ } else {
+ error_check_good key [llength $k1] 0
+ }
+ if { $op2 == "commit" } {
+ error_check_good key [llength $k2] 1
+ } else {
+ error_check_good key [llength $k2] 0
+ }
+
+ error_check_good db_close [$db1 close] 0
+ error_check_good db_close [$db2 close] 0
+ }
+ error_check_good env0_close [$env0 close] 0
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+ return
+}
+
+proc rep075_resolve { txnlist ops } {
+ error_check_good resolve_lists [llength $txnlist] [llength $ops]
+ foreach trec $txnlist op $ops {
+ set txn [lindex $trec 0]
+ error_check_good commit [$txn $op] 0
+ }
+}
diff --git a/db-4.8.30/test/rep076.tcl b/db-4.8.30/test/rep076.tcl
new file mode 100644
index 0000000..585a872
--- /dev/null
+++ b/db-4.8.30/test/rep076.tcl
@@ -0,0 +1,203 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep076
+# TEST Replication elections - what happens if elected client
+# TEST does not become master?
+# TEST
+# TEST Set up a master and 3 clients. Take down master, run election.
+# TEST The elected client will ignore the fact that it's been elected,
+# TEST so we still have 2 clients.
+# TEST
+# TEST Run another election, a regular election that allows the winner
+# TEST to become master, and make sure it goes okay. We do this both
+# TEST for the client that ignored its election and for the other client.
+# TEST
+# TEST This simulates what would happen if, say, we had a temporary
+# TEST network partition and lost the winner.
+#
+proc rep076 { method args } {
+ source ./include.tcl
+
+ global mixed_mode_logging
+ global databases_in_memory
+ global repfiles_in_memory
+
+ set tnum "076"
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ set nclients 3
+ set logsets [create_logsets [expr $nclients + 1]]
+ set winsets { { 1 1 } { 1 2 } }
+ foreach l $logsets {
+ foreach w $winsets {
+ puts "Rep$tnum ($method): Replication elections -\
+ elected client ignores election $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "Rep$tnum: Client $i logs are\
+ [lindex $l [expr $i + 1]]"
+ }
+ rep076_sub $method $nclients $tnum $l $w $args
+ }
+ }
+}
+
+proc rep076_sub { method nclients tnum logset winset largs } {
+ source ./include.tcl
+ global machids
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set m_logtype [lindex $logset 0]
+ set m_logargs [adjust_logargs $m_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ set c_logtype($i) [lindex $logset [expr $i + 1]]
+ set c_logargs($i) [adjust_logargs $c_logtype($i)]
+ set c_txnargs($i) [adjust_txnargs $c_logtype($i)]
+ }
+
+ # Open a master.
+ set envlist {}
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 $verbargs \
+ -event rep_event $repmemargs \
+ -home $masterdir $m_txnargs $m_logargs -rep_master \
+ -errpfx MASTER -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ lappend envlist "$masterenv 1"
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env_noerr -create $verbargs \
+ -event rep_event $repmemargs \
+ -home $clientdir($i) $c_txnargs($i) $c_logargs($i) \
+ -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ lappend envlist "$clientenv($i) $envid"
+ }
+
+ # Bring the clients online by processing the startup messages.
+ process_msgs $envlist
+
+ # Run a modified test001 in the master.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set niter 10
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Close master.
+ error_check_good masterenv_close [$masterenv close] 0
+ set envlist [lreplace $envlist 0 0]
+
+ # Winner1 is set up to win the first election, winner2
+ # the second.
+ set m "Rep$tnum.b"
+ set winner1 [lindex $winset 0]
+ set winner2 [lindex $winset 1]
+ set elector 1
+ set nsites $nclients
+ set nvotes $nclients
+ setpriority pri $nclients $winner1
+ if { $databases_in_memory } {
+ set dbname { "" "test.db" }
+ } else {
+ set dbname "test.db"
+ }
+
+ foreach pair $envlist {
+ set i [expr [lindex $pair 1] - 2]
+ replclear [expr $i + 2]
+ set err_cmd($i) "none"
+ set crash($i) 0
+ if { $rep_verbose == 1 } {
+ $clientenv($i) errpfx CLIENT$i
+ $clientenv($i) verbose $verbose_type on
+ $clientenv($i) errfile /dev/stderr
+ set env_cmd($i) [concat $env_cmd($i) \
+ "-errpfx CLIENT$i -errfile /dev/stderr"]
+ }
+ }
+
+
+ # Run election where winner will ignore its election and
+ # not be made master.
+ puts "\tRep$tnum: First winner ignores its election."
+ run_election env_cmd envlist err_cmd pri crash $qdir $m\
+ $elector $nsites $nvotes $nclients $winner1 0 $dbname 1
+
+ # Run second election where winner accepts its election and
+ # is made master.
+ puts "\tRep$tnum: Second winner accepts its election."
+ setpriority pri $nclients $winner2
+ run_election env_cmd envlist err_cmd pri crash $qdir $m\
+ $elector $nsites $nvotes $nclients $winner2 0 $dbname
+
+ # Clean up.
+ foreach pair $envlist {
+ set cenv [lindex $pair 0]
+ error_check_good cenv_close [$cenv close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
diff --git a/db-4.8.30/test/rep077.tcl b/db-4.8.30/test/rep077.tcl
new file mode 100644
index 0000000..b49ddd7
--- /dev/null
+++ b/db-4.8.30/test/rep077.tcl
@@ -0,0 +1,158 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep077
+# TEST
+# TEST Replication, recovery and applying log records immediately.
+# TEST Master and 1 client. Start up both sites.
+# TEST Close client and run rep_test on the master so that the
+# TEST log record is the same LSN the client would be expecting.
+# TEST Reopen client with recovery and verify the client does not
+# TEST try to apply that "expected" record before it synchronizes
+# TEST with the master.
+#
+proc rep077 { method { tnum "077"} args} {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ puts "Rep$tnum ($method): Recovered client\
+ getting immediate log records $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep077_sub $method $tnum $l $args
+ }
+}
+
+proc rep077_sub { method tnum logset largs} {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ set niter 5
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env_noerr -create \
+ $verbargs $repmemargs \
+ -home $masterdir -errpfx MASTER -txn nosync -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ # Open a client
+ repladd 2
+ set env_cmd(C) "berkdb_env_noerr -create \
+ $verbargs $repmemargs \
+ -home $clientdir -errpfx CLIENT -txn nosync -rep_client \
+ -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $env_cmd(C)]
+
+ puts "\tRep$tnum.a: Start up master and client."
+ # Bring the client online by processing the startup messages.
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Close client."
+ $clientenv close
+
+ #
+ # We want to run rep_test now and DO NOT replclear the
+ # messages for the closed client. We want to make sure
+ # that the first message the client sees upon restarting
+ # is a log record that exactly matches the current
+ # expected LSN.
+ #
+ puts "\tRep$tnum.c: Run rep_test on master with client closed."
+ #
+ # Move it forward by sending in niter as start and skip.
+ #
+ eval rep_test $method $masterenv NULL $niter $niter $niter 0 $largs
+
+ # We need to reopen with recovery to blow away our idea of
+ # who the master is, because this client will start up with
+ # the right generation number and the ready_lsn will be
+ # set to the right value for the first log record to apply.
+ #
+ # However, this client is running recovery and will have
+ # written its own recovery log records. So, until this
+ # client finds and synchronizes with the master after
+ # restarting, its ready_lsn and lp->lsn will not be
+ # in sync and this client better not try to apply the records.
+ #
+ puts "\tRep$tnum.d: Restart client with recovery and process messages."
+ set clientenv [eval $env_cmd(C) -recover]
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ process_msgs $envlist
+
+ #
+ # If we didn't crash at this point, we're okay.
+ #
+ $masterenv close
+ $clientenv close
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep078.tcl b/db-4.8.30/test/rep078.tcl
new file mode 100644
index 0000000..c1f5269
--- /dev/null
+++ b/db-4.8.30/test/rep078.tcl
@@ -0,0 +1,345 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep078
+# TEST
+# TEST Replication and basic lease test.
+# TEST Set leases on master and 2 clients.
+# TEST Do a lease operation and process to all clients.
+# TEST Read with lease on master. Do another lease operation
+# TEST and don't process on any client. Try to read with
+# TEST on the master and verify it fails. Process the messages
+# TEST to the clients and retry the read.
+#
+proc rep078 { method { tnum "078" } args } {
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Valid for all access methods. Other lease tests limit the
+ # test because there is nothing method-specific being tested.
+ # Use all methods for this basic test.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 3]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run the body of the test with and without recovery,
+ # and with and without cleaning. Skip recovery with in-memory
+ # logging - it doesn't make sense.
+ #
+ # Also skip the case where the master is in-memory and at least
+ # one of the clients is on-disk. If the master is in-memory,
+ # the wrong site gets elected because on-disk envs write a log
+ # record when they create the env and in-memory ones do not
+ # and the test wants to control which env gets elected.
+ #
+ foreach r $test_recopts {
+ foreach l $logsets {
+ set logindex [lsearch -exact $l "in-memory"]
+ if { $r == "-recover" && $logindex != -1 } {
+ puts "Skipping rep$tnum for -recover\
+ with in-memory logs."
+ continue
+ }
+ set master_logs [lindex $l 0]
+ if { $master_logs == "in-memory" } {
+ set client_logs [lsearch -exact $l "on-disk"]
+ if { $client_logs != -1 } {
+ puts "Skipping for in-memory master\
+ and on-disk client."
+ continue
+ }
+ }
+
+ puts "Rep$tnum ($method $r): Replication\
+ and basic master leases $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client 1 logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 2 logs are [lindex $l 2]"
+ rep078_sub $method $tnum $l $r $args
+ }
+ }
+}
+
+proc rep078_sub { method tnum logset recargs largs } {
+ source ./include.tcl
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+
+ # Set leases for 3 sites, 3 second timeout, 0% clock skew
+ set nsites 3
+ set lease_to 3000000
+ set lease_tosec [expr $lease_to / 1000000]
+ set clock_fast 0
+ set clock_slow 0
+ set testfile test.db
+ #
+ # Since we have to use elections, the election code
+ # assumes a 2-off site id scheme.
+ # Open a master.
+ repladd 2
+ set err_cmd(0) "none"
+ set crash(0) 0
+ set pri(0) 100
+ #
+ # Note that using the default clock skew should be the same
+ # as specifying "no skew" through the API. We want to
+ # test both API usages here.
+ #
+ set envcmd(0) "berkdb_env -create $m_txnargs $m_logargs \
+ $verbargs -errpfx MASTER -home $masterdir \
+ -rep_lease \[list $nsites $lease_to\] \
+ -event rep_event $repmemargs \
+ -rep_client -rep_transport \[list 2 replsend\]"
+ set masterenv [eval $envcmd(0) $recargs]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open two clients.
+ repladd 3
+ set err_cmd(1) "none"
+ set crash(1) 0
+ set pri(1) 10
+ set envcmd(1) "berkdb_env -create $c_txnargs $c_logargs \
+ $verbargs -errpfx CLIENT -home $clientdir \
+ -rep_lease \[list $nsites $lease_to $clock_fast $clock_slow\] \
+ -event rep_event $repmemargs \
+ -rep_client -rep_transport \[list 3 replsend\]"
+ set clientenv [eval $envcmd(1) $recargs]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ repladd 4
+ set err_cmd(2) "none"
+ set crash(2) 0
+ set pri(2) 10
+ set envcmd(2) "berkdb_env -create $c2_txnargs $c2_logargs \
+ $verbargs -errpfx CLIENT2 -home $clientdir2 \
+ -rep_lease \[list $nsites $lease_to\] \
+ -event rep_event $repmemargs \
+ -rep_client -rep_transport \[list 4 replsend\]"
+ set clientenv2 [eval $envcmd(2) $recargs]
+ error_check_good client_env [is_valid_env $clientenv2] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 2} {$clientenv 3} {$clientenv2 4}"
+ process_msgs $envlist
+
+ #
+ # Run election to get a master. Leases prevent us from
+ # simply assigning a master.
+ #
+ set msg "Rep$tnum.a"
+ puts "\tRep$tnum.a: Run initial election."
+ set nvotes $nsites
+ set winner 0
+ setpriority pri $nsites $winner
+ set elector [berkdb random_int 0 2]
+ #
+ # Note we send in a 0 for nsites because we set nsites back
+ # when we started running with leases. Master leases require
+ # that nsites be set before calling rep_start, and master leases
+ # require that the nsites arg to rep_elect be 0.
+ #
+ run_election envcmd envlist err_cmd pri crash $qdir $msg \
+ $elector 0 $nvotes $nsites $winner 0 NULL
+
+ puts "\tRep$tnum.b: Spawn a child tclsh to do txn work."
+ set pid [exec $tclsh_path $test_path/wrap.tcl \
+ rep078script.tcl $testdir/rep078script.log \
+ $masterdir $testfile $method &]
+
+ # Let child run, create database and put a txn into it.
+ # Process messages while we wait for the child to complete
+ # its txn so that the clients can grant leases.
+ puts "\tRep$tnum.c: Wait for child to write txn."
+ while { 1 } {
+ if { [file exists $testdir/marker.db] == 0 } {
+ tclsleep 1
+ } else {
+ set markerenv [berkdb_env -home $testdir -txn]
+ error_check_good markerenv_open \
+ [is_valid_env $markerenv] TRUE
+ set marker [berkdb_open -unknown -env $markerenv \
+ -auto_commit marker.db]
+ set kd [$marker get CHILD1]
+ while { [llength $kd] == 0 } {
+ process_msgs $envlist
+ tclsleep 1
+ set kd [$marker get CHILD1]
+ }
+ process_msgs $envlist
+ #
+ # Child sends us the key it used as the data
+ # of the CHILD1 key.
+ #
+ set key [lindex [lindex $kd 0] 1]
+ break
+ }
+ }
+ set masterdb [eval \
+ {berkdb_open_noerr -env $masterenv -rdonly $testfile}]
+ error_check_good dbopen [is_valid_db $masterdb] TRUE
+
+ process_msgs $envlist
+ set omethod [convert_method $method]
+ set clientdb [eval {berkdb_open_noerr \
+ -env $clientenv $omethod -rdonly $testfile}]
+ error_check_good dbopen [is_valid_db $clientdb] TRUE
+
+ set uselease ""
+ set ignorelease "-nolease"
+ puts "\tRep$tnum.d.0: Read with leases."
+ check_leaseget $masterdb $key $uselease 0
+ check_leaseget $clientdb $key $uselease 0
+ puts "\tRep$tnum.d.1: Read ignoring leases."
+ check_leaseget $masterdb $key $ignorelease 0
+ check_leaseget $clientdb $key $ignorelease 0
+ #
+ # This should fail because the lease is expired and all
+ # attempts by master to refresh it will not be processed.
+ #
+ set sleep [expr $lease_tosec + 1]
+ puts "\tRep$tnum.e.0: Sleep $sleep secs to expire leases and read again."
+ tclsleep $sleep
+ #
+ # Verify the master gets REP_LEASE_EXPIRED. Verify that the
+ # read on the client ignores leases and succeeds.
+ #
+ check_leaseget $masterdb $key $uselease REP_LEASE_EXPIRED
+ check_leaseget $clientdb $key $uselease 0
+ puts "\tRep$tnum.e.1: Read ignoring leases."
+ check_leaseget $masterdb $key $ignorelease 0
+ check_leaseget $clientdb $key $ignorelease 0
+
+ error_check_good timestamp_done \
+ [$marker put PARENT1 [timestamp -r]] 0
+
+ set kd [$marker get CHILD2]
+ while { [llength $kd] == 0 } {
+ process_msgs $envlist
+ tclsleep 1
+ set kd [$marker get CHILD2]
+ }
+ process_msgs $envlist
+ #
+ # Child sends us the key it used as the data
+ # of the CHILD2 key.
+ #
+ set key [lindex [lindex $kd 0] 1]
+
+ puts "\tRep$tnum.f: Child writes txn + ckp. Don't process msgs."
+ #
+ # Child has committed the txn and we have processed it. Now
+ # signal the child process to put a checkpoint, which we
+ # will not process. That will invalidate leases.
+ error_check_good timestamp_done \
+ [$marker put PARENT2 [timestamp -r]] 0
+
+ set kd [$marker get CHILD3]
+ while { [llength $kd] == 0 } {
+ tclsleep 1
+ set kd [$marker get CHILD3]
+ }
+
+ puts "\tRep$tnum.f.0: Read using leases fails."
+ check_leaseget $masterdb $key $uselease REP_LEASE_EXPIRED
+ puts "\tRep$tnum.f.1: Read ignoring leases."
+ check_leaseget $masterdb $key $ignorelease 0
+ puts "\tRep$tnum.g: Process messages to clients."
+ process_msgs $envlist
+ puts "\tRep$tnum.h: Verify read with leases now succeeds."
+ check_leaseget $masterdb $key $uselease 0
+
+ watch_procs $pid 5
+
+ process_msgs $envlist
+ puts "\tRep$tnum.i: Downgrade master."
+ $masterenv rep_start -client
+ process_msgs $envlist
+
+ rep_verify $masterdir $masterenv $clientdir $clientenv
+ process_msgs $envlist
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 0 1 0
+
+ # Clean up.
+ error_check_good marker_db_close [$marker close] 0
+ error_check_good marker_env_close [$markerenv close] 0
+ error_check_good masterdb_close [$masterdb close] 0
+ error_check_good masterdb_close [$clientdb close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ error_check_good clientenv_close [$clientenv2 close] 0
+
+ replclose $testdir/MSGQUEUEDIR
+
+ # Check log file for failures.
+ set errstrings [eval findfail $testdir/rep078script.log]
+ foreach str $errstrings {
+ puts "FAIL: error message in rep078 log file: $str"
+ }
+}
+
diff --git a/db-4.8.30/test/rep078script.tcl b/db-4.8.30/test/rep078script.tcl
new file mode 100644
index 0000000..3816f66
--- /dev/null
+++ b/db-4.8.30/test/rep078script.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Rep078 script - Master leases.
+#
+# Test master leases and write operations.
+#
+# Usage: rep078script masterdir dbfile method
+# masterdir: master env directory
+# dbfile: name of database file
+# method: access method
+#
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+source $test_path/reputils.tcl
+
+set usage "repscript masterdir dbfile method"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set masterdir [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set method [ lindex $argv 2 ]
+
+# Join the queue env. We assume the rep test convention of
+# placing the messages in $testdir/MSGQUEUEDIR.
+set queueenv [eval berkdb_env -home $testdir/MSGQUEUEDIR]
+error_check_good script_qenv_open [is_valid_env $queueenv] TRUE
+
+#
+# We need to set up our own machids.
+# Add 2 for master env id, and 3 and 4 for the clientenv ids.
+#
+repladd 2
+repladd 3
+repladd 4
+
+# Join the master env.
+set ma_cmd "berkdb_env_noerr -home $masterdir \
+ -txn -rep_transport \[list 2 replsend\]"
+# set ma_cmd "berkdb_env_noerr -home $masterdir \
+# -verbose {rep on} -errfile /dev/stderr \
+# -txn -rep_transport \[list 2 replsend\]"
+puts "Joining master env"
+set masterenv [eval $ma_cmd]
+error_check_good script_menv_open [is_valid_env $masterenv] TRUE
+
+# Create a marker file. Don't put anything in it yet. The parent
+# process will be processing messages while it looks for our
+# marker.
+
+
+puts "Create marker file"
+set markerenv [berkdb_env -create -home $testdir -txn]
+error_check_good markerenv_open [is_valid_env $markerenv] TRUE
+set marker \
+ [eval "berkdb_open -create -btree -auto_commit -env $markerenv marker.db"]
+
+#
+# Create the database and then do a lease operation. Don't
+# process messages in the child process.
+#
+puts "Open database"
+set args [convert_args $method]
+puts "args is $args"
+set omethod [convert_method $method]
+set db [eval "berkdb_open -env $masterenv -auto_commit -create \
+ $omethod $args $dbfile"]
+error_check_good script_db_open [is_valid_db $db] TRUE
+
+puts "Do lease op"
+set key 1
+do_leaseop $masterenv $db $method $key NULL 0
+
+puts "Put CHILD1"
+error_check_good child_key \
+ [$marker put CHILD1 $key] 0
+
+puts "Wait for PARENT1"
+# Give the parent a chance to process messages and check leases.
+while { [llength [$marker get PARENT1]] == 0 } {
+ tclsleep 1
+}
+
+puts "Do lease op 2"
+incr key
+do_leaseop $masterenv $db $method $key NULL 0
+puts "Put CHILD2"
+error_check_good child2_key \
+ [$marker put CHILD2 $key] 0
+
+puts "Wait for PARENT2"
+# Give the parent a chance to process messages and check leases.
+while { [llength [$marker get PARENT2]] == 0 } {
+ tclsleep 1
+}
+
+#
+# After we get PARENT2, do a checkpoint.
+# Then our work is done and we clean up.
+#
+puts "Write a checkpoint"
+$masterenv txn_checkpoint
+puts "Put CHILD3"
+error_check_good child2_key \
+ [$marker put CHILD3 $key] 0
+
+puts "Clean up and exit"
+# Clean up the child so the parent can go forward.
+error_check_good master_db_close [$db close] 0
+error_check_good marker_db_close [$marker close] 0
+error_check_good markerenv_close [$markerenv close] 0
+error_check_good script_master_close [$masterenv close] 0
+
diff --git a/db-4.8.30/test/rep079.tcl b/db-4.8.30/test/rep079.tcl
new file mode 100644
index 0000000..24a533c
--- /dev/null
+++ b/db-4.8.30/test/rep079.tcl
@@ -0,0 +1,329 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep079
+# TEST Replication leases and invalid usage.
+# TEST
+# TEST Open a client without leases. Attempt to set leases after rep_start.
+# TEST Attempt to declare as master without election.
+# TEST Run an election with an nsites parameter value.
+# TEST Elect a master with leases. Put some data and send to clients.
+# TEST Cleanly shutdown master env. Restart without
+# TEST recovery and verify leases are expired and refreshed.
+# TEST Add a new client without leases to a group using leases.
+#
+proc rep079 { method { tnum "079" } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Valid for all access methods, but there is no difference
+ # running it with one method over any other. Just use btree.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: Skipping for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set logsets [create_logsets 4]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ foreach l $logsets {
+ #
+ # Skip the case where the master is in-memory and at least
+ # one of the clients is on-disk. If the master is in-memory,
+ # the wrong site gets elected because on-disk envs write a log
+ # record when they create the env and in-memory ones do not
+ # and the test wants to control which env gets elected.
+ #
+ set master_logs [lindex $l 0]
+ if { $master_logs == "in-memory" } {
+ set client_logs [lsearch -exact $l "on-disk"]
+ if { $client_logs != -1 } {
+ puts "Skipping for in-memory master\
+ with on-disk client."
+ continue
+ }
+ }
+ puts "Rep$tnum: Replication leases and invalid usage $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ puts "Rep$tnum: Client 2 logs are [lindex $l 2]"
+ puts "Rep$tnum: Client 3 logs are [lindex $l 3]"
+ rep079_sub $method $tnum $l $args
+ }
+}
+
+proc rep079_sub { method tnum logset largs } {
+ global testdir
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+ set clientdir3 $testdir/CLIENTDIR3
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+ file mkdir $clientdir3
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+ set c2_logtype [lindex $logset 2]
+ set c3_logtype [lindex $logset 3]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set c2_logargs [adjust_logargs $c2_logtype]
+ set c3_logargs [adjust_logargs $c3_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+ set c2_txnargs [adjust_txnargs $c2_logtype]
+ set c3_txnargs [adjust_txnargs $c3_logtype]
+
+ # Set leases for 4 sites, 1 second timeout, 1% clock skew
+ # [NOTE: We are not adding in client3 until later so don't
+ # set it in nvotes.]
+ set nsites 4
+ set nvotes 3
+ set lease_to 1000000
+ set lease_tosec [expr $lease_to / 1000000]
+ set clock_fast 101
+ set clock_slow 100
+
+ repladd 2
+ #
+ # Use a command without setting errpfx, errfile or verbose
+ # so that error messages can be caught correctly.
+ #
+ set envcmd_err "berkdb_env_noerr -create $m_txnargs $m_logargs \
+ $repmemargs -home $masterdir -rep_transport \[list 2 replsend\]"
+
+ #
+ # This is the real env command, but we won't use it
+ # quite yet.
+ set envcmd(0) "berkdb_env -create $m_txnargs $m_logargs \
+ $repmemargs $verbargs -errpfx MASTER -home $masterdir \
+ -event rep_event \
+ -rep_transport \[list 2 replsend\]"
+
+ #
+ # Leases must be configured before rep_start is called.
+ # Open a repl env without leases. Try to configure leases
+ # after the open has already called rep_start. Open as a client.
+ #
+ puts "\tRep$tnum.a: Try to configure leases after rep_start."
+ set noleaseenv [eval $envcmd_err -rep_client]
+ set stat [catch {$noleaseenv rep_lease \
+ [list $nsites $lease_to $clock_fast $clock_slow]} lease]
+ error_check_bad stat $stat 0
+ error_check_good menverror [is_substr $lease "timeout must be set"] 1
+ error_check_good close [$noleaseenv close] 0
+ env_cleanup $masterdir
+
+ #
+ # If leases are being used, elections must be used. A site
+ # cannot simply upgrade itself to master. Test that we cannot
+ # open as a client and then upgrade ourself to a master just
+ # by calling rep_start.
+ #
+ set upgenv [eval $envcmd_err -rep_client \
+ -rep_lease \[list $nsites $lease_to $clock_fast $clock_slow\]]
+ puts "\tRep$tnum.b: Try to upgrade a client without election."
+ set stat [catch {$upgenv rep_start -master} ret]
+ error_check_bad upg_stat $stat 0
+ error_check_good upg_str [is_substr $ret "Cannot become master"] 1
+ error_check_good close [$upgenv close] 0
+ env_cleanup $masterdir
+
+ #
+ # Now test inconsistencies dealing with having a group that
+ # is using lease up and running. For instance, if leases are
+ # configured, the 'nsites' arg to rep_elect must be 0, etc.
+ #
+ # Open the master. Must open as a client and get elected.
+ #
+ set err_cmd(0) "none"
+ set crash(0) 0
+ set pri(0) 100
+ set masterenv [eval $envcmd(0) -rep_client \
+ -rep_lease \[list $nsites $lease_to $clock_fast $clock_slow\]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open two clients.
+ repladd 3
+ set err_cmd(1) "none"
+ set crash(1) 0
+ set pri(1) 10
+ set envcmd(1) "berkdb_env -create $c_txnargs $c_logargs \
+ $repmemargs $verbargs -errpfx CLIENT -home $clientdir \
+ -event rep_event \
+ -rep_lease \[list $nsites $lease_to $clock_fast $clock_slow\] \
+ -rep_client -rep_transport \[list 3 replsend\]"
+ set clientenv [eval $envcmd(1)]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ repladd 4
+ set err_cmd(2) "none"
+ set crash(2) 0
+ set pri(2) 10
+ set envcmd(2) "berkdb_env_noerr -create $c2_txnargs $c2_logargs \
+ $repmemargs -home $clientdir2 -event rep_event \
+ -rep_lease \[list $nsites $lease_to $clock_fast $clock_slow\] \
+ -rep_client -rep_transport \[list 4 replsend\]"
+ set clientenv2 [eval $envcmd(2)]
+ error_check_good client_env [is_valid_env $clientenv2] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$masterenv 2} {$clientenv 3} {$clientenv2 4}"
+ process_msgs $envlist
+
+ #
+ # Send a non-zero nsites value for an election. That is an error.
+ #
+ puts "\tRep$tnum.c: Try to run election with leases and nsites value."
+ #
+ # !!! We have not set -errpfx or -errfile in envcmd(2) above
+ # otherwise the error output won't be set in 'ret' below and
+ # the test will fail. Set it after this piece of the test.
+ #
+ set timeout 5000000
+ set res [catch {$clientenv2 rep_elect $nsites $nvotes $pri(2) \
+ $timeout} ret]
+ error_check_bad catch $res 0
+ error_check_good ret [is_substr $ret "nsites must be zero"] 1
+
+ #
+ # Now we can set verbose args, errpfx, etc. Set it in the
+ # command (for elections) and also manually add it to the
+ # current env handle.
+ #
+ set envcmd(2) "$envcmd(2) $verbargs -errpfx CLIENT2"
+ if { $rep_verbose == 1 } {
+ $clientenv2 verbose $verbose_type on
+ $clientenv2 errpfx CLIENT2
+ }
+
+ #
+ # This next section will test that a replicated env that is master
+ # can cleanly close and then reopen without recovery and retain
+ # its master status.
+ #
+ set msg "Rep$tnum.d"
+ set nvotes [expr $nsites - 1]
+ set winner 0
+ setpriority pri $nsites $winner
+ set elector [berkdb random_int 0 2]
+ puts "\tRep$tnum.d: Run election for real to get master."
+ #
+ # Run election for real. Set nsites to 0 for this command.
+ #
+ repladd 5
+ set err_cmd(3) "none"
+ set crash(3) 0
+ set pri(3) 0
+ run_election envcmd envlist err_cmd pri crash $qdir $msg \
+ $elector 0 $nvotes $nsites $winner 0 NULL
+
+ puts "\tRep$tnum.e: Write a checkpoint."
+ #
+ # Writing a checkpoint forces a PERM record which will cause
+ # the clients to grant us their leases. Then, while holding
+ # the lease grants we can do the next part of the test to
+ # close and cleanly reopen while holding leases.
+ $masterenv txn_checkpoint -force
+
+ process_msgs $envlist
+
+ puts "\tRep$tnum.f.0: Close master env."
+ error_check_good mclose [$masterenv close] 0
+ set sleep [expr $lease_tosec + 1]
+ puts "\tRep$tnum.f.1: Sleep $sleep secs to expire lease grants."
+ tclsleep $sleep
+ #
+ # We should be able to reopen the master env without running
+ # recovery and still retain our mastership.
+ set masterenv [eval $envcmd(0) -rep_master \
+ -rep_lease \[list $nsites $lease_to $clock_fast $clock_slow\]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ set envlist "{$masterenv 2} {$clientenv 3} {$clientenv2 4}"
+
+ #
+ # Verify that if a non-lease site tries to join a group that
+ # is using leases, it gets an error. Configuring leases
+ # must be all-or-none across all group members.
+ #
+ puts "\tRep$tnum.g: Add client3 that does not configure leases."
+ replclear 5
+ set envcmd(3) "berkdb_env_noerr -create $c3_txnargs $c3_logargs \
+ -home $clientdir3 -event rep_event \
+ $repmemargs $verbargs -errpfx CLIENT3 \
+ -rep_client -rep_transport \[list 5 replsend\]"
+ set clientenv3 [eval $envcmd(3)]
+ error_check_good client_env [is_valid_env $clientenv3] TRUE
+
+ # Bring the clients online by processing the startup messages.
+ set origlist $envlist
+ set envlist "{$masterenv 2} {$clientenv 3} \
+ {$clientenv2 4} {$clientenv3 5}"
+ process_msgs $envlist 0 NONE err
+
+ puts "\tRep$tnum.g.1: Verify client fatal error."
+ error_check_good process_msgs_err [is_substr $err DB_RUNRECOVERY] 1
+ #
+ # Close to reclaim Tcl resources, but we want to catch/ignore
+ # the continuing DB_RUNRECOVERY error.
+ #
+ catch {$clientenv3 close} ret
+ set envlist $origlist
+
+ puts "\tRep$tnum.h: Check expired lease error on txn commit."
+ #
+ # Leases are already expired, so attempt to commit should fail.
+ # (And this will be the 'before we commit' check that returns
+ # an error, not the 'after' check that panics).
+ #
+ set txn [$masterenv txn]
+ set stat [catch {$txn commit} ret]
+ error_check_good stat $stat 1
+ error_check_good exp [is_substr $ret REP_LEASE_EXPIRED] 1
+
+ error_check_good mclose [$masterenv close] 0
+ error_check_good cclose [$clientenv close] 0
+ error_check_good c2close [$clientenv2 close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep080.tcl b/db-4.8.30/test/rep080.tcl
new file mode 100644
index 0000000..413fcfe
--- /dev/null
+++ b/db-4.8.30/test/rep080.tcl
@@ -0,0 +1,189 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep080
+# TEST NOAUTOINIT with empty client logs.
+# TEST
+# TEST Verify that a fresh client trying to join the group for
+# TEST the first time observes the setting of DELAY_SYNC and NOAUTOINIT
+# TEST properly.
+# TEST
+proc rep080 { method { niter 200 } { tnum "080" } args } {
+
+ source ./include.tcl
+ global mixed_mode_logging
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Rep$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ if { $mixed_mode_logging != 0 } {
+ puts "Rep$tnum: skipping for in-mem (or mixed) logging."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # The {"" "-recover"} loop that is typical in replication tests can be
+ # useful for tests which close existing environments at some point, and
+ # then later reopen them. (When we reopen, we do so either with
+ # recovery, or without it.) But this test never does that.
+ #
+ puts "Rep$tnum ($method):\
+ Test of NOAUTOINIT with empty client logs $msg $msg2."
+ rep080_sub $method $niter $tnum $args
+}
+
+proc rep080_sub { method niter tnum largs } {
+ global testdir
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir1 $testdir/CLIENTDIR1
+ set clientdir2 $testdir/CLIENTDIR2
+ set clientdir3 $testdir/CLIENTDIR3
+ set clientdir4 $testdir/CLIENTDIR4
+
+ file mkdir $masterdir
+ file mkdir $clientdir1
+ file mkdir $clientdir2
+ file mkdir $clientdir3
+ file mkdir $clientdir4
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $verbargs -errpfx MASTER \
+ $repmemargs \
+ -home $masterdir -txn -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd -rep_master]
+ set envlist "{$masterenv 1}"
+
+ # Run rep_test in the master.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ eval rep_test $method $masterenv NULL $niter 0 0 $largs
+ process_msgs $envlist
+
+ # Open a client
+ puts "\tRep$tnum.b: Add a normal client."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT \
+ $repmemargs \
+ -home $clientdir1 -txn -rep_transport \[list 2 replsend\]"
+ set clientenv1 [eval $cl_envcmd -rep_client]
+ lappend envlist [list $clientenv1 2]
+ process_msgs $envlist
+
+ rep_verify $masterdir $masterenv $clientdir1 $clientenv1 0 1 1
+
+ # Open a client with NOAUTOINIT
+ #
+ puts "\tRep$tnum.c: Add a client with NOAUTOINIT (should fail)."
+ repladd 3
+ set cl_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT \
+ $repmemargs \
+ -home $clientdir2 -txn -rep_transport \[list 3 replsend\]"
+ set clientenv2 [eval $cl_envcmd -rep_client]
+ $clientenv2 rep_config {noautoinit on}
+
+ lappend envlist [list $clientenv2 3]
+ process_msgs $envlist 0 NONE error
+ error_check_good errchk [is_substr $error JOIN_FAILURE] 1
+
+ # Open a client with DELAY_SYNC
+ #
+ puts "\tRep$tnum.d: Add a client with DELAY_SYNC."
+ repladd 4
+ set cl_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT \
+ $repmemargs \
+ -home $clientdir3 -txn -rep_transport \[list 4 replsend\]"
+ set clientenv3 [eval $cl_envcmd -rep_client]
+ $clientenv3 rep_config {delayclient on}
+
+ lappend envlist [list $clientenv3 4]
+ process_msgs $envlist 0 NONE error
+ error_check_good errchk2 $error 0
+
+ error_check_bad expect_error [catch {rep_verify \
+ $masterdir $masterenv $clientdir3 $clientenv3 0 1 1}] 0
+
+ error_check_good rep_sync [$clientenv3 rep_sync] 0
+ process_msgs $envlist 0 NONE error
+ error_check_good errchk3 $error 0
+ rep_verify $masterdir $masterenv $clientdir3 $clientenv3
+
+ # Open a client with both DELAY_SYNC and NOAUTOINIT
+ #
+ puts "\tRep$tnum.f: Add a client with DELAY_SYNC and NOAUTOINIT."
+ repladd 5
+ set cl_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT \
+ $repmemargs \
+ -home $clientdir4 -txn -rep_transport \[list 5 replsend\]"
+ set clientenv4 [eval $cl_envcmd -rep_client]
+ $clientenv4 rep_config {delayclient on}
+ $clientenv4 rep_config {noautoinit on}
+
+ lappend envlist [list $clientenv4 5]
+ process_msgs $envlist 0 NONE error
+ error_check_good process_msgs $error 0
+
+ error_check_bad expect_error2 [catch {rep_verify\
+ $masterdir $masterenv $clientdir4 $clientenv4 0 1 1}] 0
+
+ error_check_bad rep_sync [catch {$clientenv4 rep_sync} result] 0
+ error_check_good errchk5 [is_substr $result JOIN_FAILURE] 1
+
+ $masterenv close
+ $clientenv1 close
+ $clientenv2 close
+ $clientenv3 close
+ $clientenv4 close
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep081.tcl b/db-4.8.30/test/rep081.tcl
new file mode 100644
index 0000000..a865e32
--- /dev/null
+++ b/db-4.8.30/test/rep081.tcl
@@ -0,0 +1,288 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rep081
+# TEST Test of internal initialization and missing database files.
+# TEST
+# TEST One master, one client, two databases.
+# TEST Generate several log files.
+# TEST Remove old master log files.
+# TEST Start up client.
+# TEST Remove or replace one master database file while client initialization
+# TEST is in progress, make sure other master database can keep processing.
+#
+proc rep081 { method { niter 200 } { tnum "081" } args } {
+
+ source ./include.tcl
+ global databases_in_memory
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Valid for all access methods.
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+
+ # This test needs to set its own pagesize.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Rep$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ set logsets [create_logsets 2]
+
+ # Set up for on-disk or in-memory databases.
+ set msg "using on-disk databases"
+ if { $databases_in_memory } {
+ set msg "using named in-memory databases"
+ if { [is_queueext $method] } {
+ puts -nonewline "Skipping rep$tnum for method "
+ puts "$method with named in-memory databases."
+ return
+ }
+ }
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ # Run with options to remove or replace the master database file.
+ set testopts { removefile replacefile }
+ foreach t $testopts {
+ foreach l $logsets {
+ puts "Rep$tnum ($method $t $args): Test of\
+ internal init with missing db file $msg $msg2."
+ puts "Rep$tnum: Master logs are [lindex $l 0]"
+ puts "Rep$tnum: Client logs are [lindex $l 1]"
+ rep081_sub $method $niter $tnum $l $t $args
+ }
+ }
+}
+
+proc rep081_sub { method niter tnum logset testopt largs } {
+ global testdir
+ global util_path
+ global databases_in_memory
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Log size is small so we quickly create more than one.
+ # The documentation says that the log file must be at least
+ # four times the size of the in-memory log buffer.
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_max [expr $pagesize * 8]
+
+ set m_logtype [lindex $logset 0]
+ set c_logtype [lindex $logset 1]
+
+ # In-memory logs cannot be used with -txn nosync.
+ set m_logargs [adjust_logargs $m_logtype]
+ set c_logargs [adjust_logargs $c_logtype]
+ set m_txnargs [adjust_txnargs $m_logtype]
+ set c_txnargs [adjust_txnargs $c_logtype]
+
+ # Open a master.
+ repladd 1
+ set ma_envcmd "berkdb_env_noerr -create $m_txnargs $repmemargs \
+ $m_logargs -log_max $log_max -errpfx MASTER $verbargs \
+ -home $masterdir -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $ma_envcmd -rep_master]
+ $masterenv rep_limit 0 0
+
+ # Run rep_test in the master only.
+ puts "\tRep$tnum.a: Running rep_test in replicated env."
+ set start 0
+ if { $databases_in_memory } {
+ set testfile { "" "test.db" }
+ set testfile2 { "" "test2.db" }
+ } else {
+ set testfile "test.db"
+ set testfile2 "test2.db"
+ }
+ set omethod [convert_method $method]
+ set dbargs [convert_args $method $largs]
+ set mdb [eval {berkdb_open_noerr} -env $masterenv -auto_commit\
+ -create -mode 0644 $omethod $dbargs $testfile ]
+ error_check_good reptest_db [is_valid_db $mdb] TRUE
+ set mdb2 [eval {berkdb_open_noerr} -env $masterenv -auto_commit\
+ -create -mode 0644 $omethod $dbargs $testfile2 ]
+ error_check_good reptest_db2 [is_valid_db $mdb2] TRUE
+
+ set stop 0
+ while { $stop == 0 } {
+ # Run rep_test in the master beyond the first log file.
+ eval rep_test $method \
+ $masterenv $mdb $niter $start $start 0 $largs
+ eval rep_test $method \
+ $masterenv $mdb2 $niter $start $start 0 $largs
+ incr start $niter
+
+ puts "\tRep$tnum.a.1: Run db_archive on master."
+ if { $m_logtype == "on-disk" } {
+ set res \
+ [eval exec $util_path/db_archive -d -h $masterdir]
+ }
+ #
+ # Make sure we have moved beyond the first log file.
+ #
+ set first_master_log [get_logfile $masterenv first]
+ if { $first_master_log > 1 } {
+ set stop 1
+ }
+
+ }
+
+ puts "\tRep$tnum.b: Open client."
+ repladd 2
+ set cl_envcmd "berkdb_env_noerr -create $c_txnargs $repmemargs \
+ $c_logargs -log_max $log_max -errpfx CLIENT $verbargs \
+ -home $clientdir -rep_transport \[list 2 replsend\]"
+ set clientenv [eval $cl_envcmd -rep_client]
+ $clientenv rep_limit 0 0
+ set envlist "{$masterenv 1} {$clientenv 2}"
+
+ # Check initial value for number of FILE_FAIL internal init cleanups.
+ error_check_good ff_cleanup \
+ [stat_field $clientenv rep_stat "File fail cleanups done"] 0
+
+ #
+ # Process messages in a controlled manner until the update (internal
+ # init) starts and we can remove or replace the database file.
+ #
+ set loop 10
+ set i 0
+ set entries 100
+ set in_rec_page 0
+ set dbrem_init 0
+ if { $testopt == "replacefile" } {
+ set errstr "invalid argument"
+ } else {
+ set errstr "no such file or directory"
+ }
+ while { $i < $loop } {
+ set nproced 0
+ incr nproced [proc_msgs_once $envlist NONE err]
+ #
+ # Last time through the loop the mdb database file
+ # is gone. The master is processing the client's PAGE_REQ
+ # and not finding the database file it needs so it sends a
+ # FILE_FAIL and returns an error. Break out of loop if
+ # expected error seen.
+ #
+ if { [is_substr $err $errstr] } {
+ error_check_good nproced $nproced 0
+ break
+ } else {
+ error_check_bad nproced $nproced 0
+ error_check_good errchk $err 0
+ }
+ # Internal init file is very transient, but exists in
+ # the rep files on-disk case during the second iteration
+ # of this loop. Take this chance to make sure the internal
+ # init file doesn't exist when rep files are in-memory.
+ if { $i == 1 && $repfiles_in_memory == 1 } {
+ error_check_good noinit \
+ [file exists "$clientdir/__db.rep.init"] 0
+ }
+ #
+ # When we are in internal init, remove the mdb database file.
+ # This causes the master to send a FILE_FAIL that will cause
+ # the client to clean up its internal init.
+ #
+ if { $in_rec_page == 0 } {
+ set clstat [exec $util_path/db_stat \
+ -N -r -R A -h $clientdir]
+ if { $dbrem_init == 0 && \
+ [is_substr $clstat "REP_F_RECOVER_PAGE"] } {
+ set in_rec_page 1
+ set dbrem_init 1
+ #
+ # Turn off timer so that client sync doesn't
+ # prevent db operations.
+ #
+ $masterenv test force noarchive_timeout
+
+ # Close and remove mdb.
+ puts "\tRep$tnum.c: Remove a database file."
+ error_check_good mdb_close [$mdb close] 0
+ error_check_good remove_x [$masterenv \
+ dbremove -auto_commit $testfile] 0
+
+ # Make sure mdb file is really gone.
+ set dfname [file join $masterdir $testfile]
+ error_check_good gone [file exists $dfname] 0
+
+ # Replace mdb file with non-db content.
+ if { $testopt == "replacefile" } {
+ puts \
+ "\tRep$tnum.c.1: Replace database file."
+ set repfileid [open $dfname w+]
+ puts -nonewline $repfileid \
+ "This is not a database file."
+ close $repfileid
+ }
+ }
+ }
+ incr i
+ }
+
+ #
+ # Process two more batches of messages so client can process
+ # the FILE_FAIL message and the resulting new internal init.
+ #
+ puts "\tRep$tnum.d: Process messages including FILE_FAIL."
+ process_msgs $envlist 0 NONE err
+ if { $err != 0 } {
+ error_check_good errchk [is_substr $err $errstr] 1
+ }
+ puts "\tRep$tnum.d.1: Process messages including new internal init."
+ process_msgs $envlist 0 NONE err
+ error_check_good errchk $err 0
+
+ puts "\tRep$tnum.e: Verify logs and databases."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1 test2.db
+
+ # Make sure we have seen a FILE_FAIL internal init cleanup.
+ error_check_good ff_cleanup \
+ [stat_field $clientenv rep_stat "File fail cleanups done"] 1
+
+ error_check_good mdb_close2 [$mdb2 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+
diff --git a/db-4.8.30/test/rep082.tcl b/db-4.8.30/test/rep082.tcl
new file mode 100644
index 0000000..71fc58d
--- /dev/null
+++ b/db-4.8.30/test/rep082.tcl
@@ -0,0 +1,209 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST rep082
+# TEST Sending replication requests to correct master site.
+# TEST
+# TEST Regression test for a bug [#16592] where a client could send an
+# TEST UPDATE_REQ to another client instead of the master.
+#
+proc rep082 { method { niter 200 } { tnum "082" } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ puts "Rep$tnum: ($method) Test that\
+ client doesn't send UPDATE_REQ to another client $msg2."
+
+ rep082_sub $method $niter $tnum $args
+}
+
+proc rep082_sub { method niter tnum largs } {
+ global testdir
+ global util_path
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir [set dirA $testdir/A]
+ file mkdir [set dirB $testdir/B]
+ file mkdir [set dirC $testdir/C]
+
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_A \
+ -home $dirA -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd -rep_master]
+
+ # Open a client
+ repladd 2
+ set env_B_cmd "berkdb_env_noerr -create -txn $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_B \
+ -home $dirB -rep_transport \[list 2 replsend\]"
+ set envs(B) [eval $env_B_cmd -rep_client]
+
+ # Open 2nd client
+ repladd 3
+ set env_C_cmd "berkdb_env_noerr -create -txn $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_C \
+ -home $dirC -rep_transport \[list 3 replsend\]"
+ set envs(C) [eval $env_C_cmd -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+
+ # Run rep_test in the master (and update clients).
+ puts "\tRep$tnum.a: populate initial portion of log."
+ eval rep_test $method $envs(A) NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ $envs(A) close
+ $envs(B) close
+ $envs(C) close
+
+ # At this point, we have a first section of the log history that was
+ # produced at master site A, and is replicated to both other sites. Now
+ # let's produce a second section of history, also produced at master
+ # site A, but only replicated to site C; make sure this second section
+ # spans a log file boundary. Archive log files at site C, so that we
+ # make sure that site C has only a fraction of this second section.
+ #
+ set res [eval exec $util_path/db_archive -l -h $dirC]
+ set last_client_log [lindex [lsort $res] end]
+
+ set envs(A) [eval $env_A_cmd -recover -rep_master]
+ set envs(C) [eval $env_C_cmd -recover -rep_client]
+ replclear 2
+ process_msgs "{$envs(A) 1} {$envs(C) 3}"
+
+ set stop 0
+ set start 0
+ set count 0
+ while { $stop == 0 } {
+ incr count
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.b: Fill log until next log file."
+ incr start $niter
+ eval rep_test $method $envs(A) NULL $niter $start $start 0 $largs
+
+ replclear 2
+ process_msgs "{$envs(A) 1} {$envs(C) 3}"
+
+ puts "\tRep$tnum.c: Run db_archive on client C."
+ exec $util_path/db_archive -d -h $dirC
+ set res [eval exec $util_path/db_archive -l -h $dirC]
+ if { [lsearch -exact $res $last_client_log] == -1} {
+ set stop 1
+ }
+ }
+
+ # Now make site B become the master. Since site B was not running
+ # during the last phase, it does not have any of the "second section of
+ # log history" that we produced in that phase. So site A will have to
+ # throw away those transactions in order to sync up with B. HOWEVER,
+ # site B will now generate yet another new section of log history, which
+ # is identical to the set of transactions generated a moment ago at site
+ # A. In other words, although this is the third section of history to
+ # be generated, we have arranged to have it completely replace the
+ # second section, and to have it exactly match! Note that we leave site
+ # C out of the picture during this phase.
+ #
+ $envs(A) close
+ $envs(C) close
+ set envs(B) [eval $env_B_cmd -recover -rep_master]
+ set envs(A) [eval $env_A_cmd -recover -rep_client]
+
+ set start 0
+ while {$count > 0} {
+ puts "\tRep$tnum.d: Running rep_test in replicated env."
+ incr start $niter
+ eval rep_test $method $envs(B) NULL $niter $start $start 0 $largs
+
+ replclear 3
+ process_msgs "{$envs(A) 1} {$envs(B) 2}"
+
+ incr count -1
+ }
+
+ # Now start up site C again, but configure it to rely on site A for
+ # client-to-client synchronization. Recall the known contents of site
+ # C's transaction log: it has a partial copy of the "second section" of
+ # log history (it has the end of that section, but not the beginning).
+ # The transactions in this log will have the same LSN's as are currently
+ # in place at sites A and B (which, remember, were produced by the
+ # identical "third section" of history), but the commit record contents
+ # won't exactly match, because the third section was produced by master
+ # site B.
+ #
+ # During the verify dance, client C will continue to walk back the log,
+ # finding commit records which find matching LSNs at A/B, but no
+ # matching contents. When it hits the archived log file boundary it
+ # will have to give up without having found a match. Thus we have
+ # produced a situation where an incoming VERIFY message from another
+ # client (site A) results in client C sending an UPDATE_REQ. We want to
+ # make sure that client C sends the UPDATE_REQ to the master, rather
+ # than blindly sending to the same site that produced the VERIFY
+ # message.
+ #
+ puts "\tRep$tnum.e: start client C, with A as peer."
+ set env_C_cmd "berkdb_env_noerr -create -txn $verbargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_C \
+ -home $dirC -rep_transport \[list 3 rep082_send\]"
+ set envs(C) [eval $env_C_cmd -recover -rep_client]
+ process_msgs "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+
+ $envs(C) close
+ $envs(A) close
+ $envs(B) close
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+# We use this special-purpose wrapper send function only in the very last phase
+# of the test, and only at site C. Before that we just use the normal send
+# function as usual. Since we know exactly what sites are in what roles, we can
+# simply hard-code the EID numbers: site B (2) is the master, and site A (1) is
+# the desired target site for c2c synchronization.
+#
+proc rep082_send { control rec fromid toid flags lsn } {
+ if {$toid == 2 && [lsearch $flags "rerequest"] == -1 \
+ && [lsearch $flags "any"] != -1} {
+ set toid 1
+ }
+ replsend $control $rec $fromid $toid $flags $lsn
+}
diff --git a/db-4.8.30/test/rep083.tcl b/db-4.8.30/test/rep083.tcl
new file mode 100644
index 0000000..e4ff81b
--- /dev/null
+++ b/db-4.8.30/test/rep083.tcl
@@ -0,0 +1,161 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST rep083
+# TEST Replication clients must never send VERIFY_FAIL to a c2c request.
+# TEST
+# TEST Regression test for a bug [#16592] where a client could send a
+# TEST VERIFY_FAIL to another client, which is illegal.
+#
+proc rep083 { method { niter 200 } { tnum "083" } args } {
+ source ./include.tcl
+ global repfiles_in_memory
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ if { $checking_valid_methods } {
+ return "ALL"
+ }
+
+ set args [convert_args $method $args]
+
+ set msg2 "and on-disk replication files"
+ if { $repfiles_in_memory } {
+ set msg2 "and in-memory replication files"
+ }
+
+ puts "Rep$tnum: ($method)\
+ Test that client never sends VERIFY_FAIL $msg2."
+ rep083_sub $method $niter $tnum $args
+}
+
+proc rep083_sub { method niter tnum largs } {
+ global testdir
+ global util_path
+ global repfiles_in_memory
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ set repmemargs ""
+ if { $repfiles_in_memory } {
+ set repmemargs "-rep_inmem_files "
+ }
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir [set dirA $testdir/A]
+ file mkdir [set dirB $testdir/B]
+ file mkdir [set dirC $testdir/C]
+
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_A \
+ -home $dirA -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd -rep_master]
+
+ # Open a client
+ repladd 2
+ set env_B_cmd "berkdb_env_noerr -create -txn $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_B \
+ -home $dirB -rep_transport \[list 2 replsend\]"
+ set envs(B) [eval $env_B_cmd -rep_client]
+
+ # Open 2nd client
+ repladd 3
+ set env_C_cmd "berkdb_env_noerr -create -txn $verbargs $repmemargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_C \
+ -home $dirC -rep_transport \[list 3 rep083_send\]"
+ set envs(C) [eval $env_C_cmd -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+
+ # Run rep_test in the master (and update clients).
+ puts "\tRep$tnum.a: populate initial portion of log."
+ eval rep_test $method $envs(A) NULL $niter 0 0 0 $largs
+ process_msgs $envlist
+
+ # Take note of the initial value of "Pages received"
+ set pages_rcvd0 [stat_field $envs(C) rep_stat "Pages received"]
+
+ set res [eval exec $util_path/db_archive -l -h $dirB]
+ set last_client_log [lindex [lsort $res] end]
+
+ set stop 0
+ set start 0
+ while { $stop == 0 } {
+ # Run rep_test in the master (don't update client).
+ puts "\tRep$tnum.b: Fill log until next log file."
+ incr start $niter
+ eval rep_test $method $envs(A) NULL $niter $start $start 0 $largs
+
+ replclear 3
+ process_msgs "{$envs(A) 1} {$envs(B) 2}"
+
+ puts "\tRep$tnum.c: Run db_archive on client B."
+ exec $util_path/db_archive -d -h $dirB
+ set res [eval exec $util_path/db_archive -l -h $dirB]
+ if { [lsearch -exact $res $last_client_log] == -1} {
+ set stop 1
+ }
+ }
+
+ # At this point, client C is far behind (because we've been throwing
+ # away messages destined to it). And client B has minimal log, because
+ # we've been aggressively archiving, but the master A has its entire log
+ # history. Therefore, upon resuming messaging to C, it should be able
+ # to catch up without doing an internal init.
+
+ puts "\tRep$tnum.d: Write one more txn, and resume msging to C."
+ incr start $niter
+ eval rep_test $method $envs(A) NULL 1 $start $start 0 $largs
+ process_msgs $envlist
+
+ # Pause and do it one more time, to provide time for client C's
+ # time-based gap request trigger to work.
+ #
+ tclsleep 1
+ incr start 1
+ eval rep_test $method $envs(A) NULL 1 $start $start 0 $largs
+ process_msgs $envlist
+
+ # Make sure C didn't do an internal init (which we detect by testing
+ # whether it received any pages recently).
+ #
+ error_check_good no_internal_init \
+ [stat_field $envs(C) rep_stat "Pages received"] $pages_rcvd0
+ $envs(C) close
+ $envs(A) close
+ $envs(B) close
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+# We use this special-purpose wrapper send function only at site C. Since we
+# know exactly what sites are in what roles, we can simply hard-code the EID
+# numbers: site A (1) is the master, and site B (2) is the desired target site
+# for c2c synchronization.
+#
+proc rep083_send { control rec fromid toid flags lsn } {
+ if {$toid == 1 && [lsearch $flags "rerequest"] == -1 \
+ && [lsearch $flags "any"] != -1} {
+ set toid 2
+ }
+ replsend $control $rec $fromid $toid $flags $lsn
+}
diff --git a/db-4.8.30/test/rep084.tcl b/db-4.8.30/test/rep084.tcl
new file mode 100644
index 0000000..125a3a8
--- /dev/null
+++ b/db-4.8.30/test/rep084.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2008-2009 Oracle. All rights reserved.
+#
+# TEST rep084
+# TEST Abbreviated internal init for named in-memory databases (NIMDBs).
+# TEST
+#
+proc rep084 { method { niter 200 } { tnum "084" } args } {
+ source ./include.tcl
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # As an internal init test, run for btree and queue only.
+ # As an in-memory database test, skip queueext.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || [is_queue $method] == 1 } {
+ if { [is_queueext $method] == 0 } {
+ lappend test_methods $method
+ }
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 && [is_queue $method] != 1 } {
+ puts "Skipping internal init test rep$tnum for method $method."
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping in-memory db test rep$tnum for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ rep084_sub $method $niter $tnum $args
+}
+
+proc rep084_sub { method niter tnum largs } {
+ global testdir
+ global util_path
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+ puts "Rep$tnum: ($method) Abbreviated internal init for NIMDBs."
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir [set dirA $testdir/A]
+ file mkdir [set dirB $testdir/B]
+ file mkdir [set dirC $testdir/C]
+
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn $verbargs \
+ -errpfx SITE_A \
+ -home $dirA -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd -rep_master]
+
+ # Open two clients
+ repladd 2
+ set env_B_cmd "berkdb_env_noerr -create -txn $verbargs \
+ -errpfx SITE_B \
+ -home $dirB -rep_transport \[list 2 replsend\]"
+ set envs(B) [eval $env_B_cmd -rep_client]
+
+ repladd 3
+ set env_C_cmd "berkdb_env_noerr -create -txn $verbargs \
+ -errpfx SITE_C \
+ -home $dirC -rep_transport \[list 3 replsend\]"
+ set envs(C) [eval $env_C_cmd -rep_client]
+
+ # Bring the clients online by processing the startup messages.
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+
+ # Create some data in each of two databases, one a regular DB, and the
+ # other a NIMDB.
+ puts "\tRep$tnum.a: insert data."
+ set start 0
+ eval rep_test $method $envs(A) NULL $niter $start $start 0 $largs
+ set db [eval berkdb_open -env $envs(A) -auto_commit $largs \
+ -create $omethod "{}" "mynimdb"]
+ eval rep_test $method $envs(A) $db $niter $start $start 0 $largs
+ process_msgs $envlist
+
+ $db close
+ $envs(B) close
+ $envs(C) close
+
+ # Restart the clients with recovery, which causes the NIMDB to
+ # disappear. Before syncing with the master, verify that the NIMDB is
+ # gone. Verify that the NOAUTOINIT setting does not inhibit NIMDB
+ # materialization.
+ puts "\tRep$tnum.b: restart with recovery; \
+check expected database existence."
+ set envs(B) [eval $env_B_cmd -rep_client -recover]
+ set envs(C) [eval $env_C_cmd -rep_client -recover]
+ $envs(C) rep_config {noautoinit on}
+
+ [berkdb_open -env $envs(B) -auto_commit "test.db"] close
+ [berkdb_open -env $envs(C) -auto_commit "test.db"] close
+ error_check_good "NIMDB doesn't exist after recovery" \
+ [catch {berkdb_open -env $envs(B) -auto_commit "" "mynimdb"}] 1
+
+ puts "\tRep$tnum.c: sync with master, NIMDB reappears."
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+
+ # After syncing with the master, the client should have copies of all
+ # databases.
+ #
+ [berkdb_open -env $envs(B) -auto_commit "test.db"] close
+ [berkdb_open -env $envs(B) -auto_commit "" "mynimdb"] close
+ [berkdb_open -env $envs(C) -auto_commit "test.db"] close
+ [berkdb_open -env $envs(C) -auto_commit "" "mynimdb"] close
+
+ # Run some more updates into the NIMDB at the master, and replicate them
+ # to the client, to make sure the client can apply transactions onto a
+ # NIMDB that had disappeared (and is now back).
+ #
+ incr start $niter
+ set db [berkdb_open -env $envs(A) -auto_commit "" "mynimdb"]
+ eval rep_test $method $envs(A) $db $niter $start $start 0 $largs
+ process_msgs $envlist
+ $db close
+
+ $envs(C) close
+ $envs(B) close
+ $envs(A) close
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep085.tcl b/db-4.8.30/test/rep085.tcl
new file mode 100644
index 0000000..82dfce3
--- /dev/null
+++ b/db-4.8.30/test/rep085.tcl
@@ -0,0 +1,154 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# TEST rep085
+# TEST Skipping unnecessary abbreviated internal init.
+# TEST
+# TEST Make sure that once we've materialized NIMDBs, we don't bother
+# TEST trying to do it again on subsequent sync without recovery. Make
+# TEST sure we do probe for the need to materialize NIMDBs, but don't do
+# TEST any internal init at all if there are no NIMDBs. Note that in order to
+# TEST do this test we don't even need any NIMDBs.
+
+proc rep085 { method {niter 20} {tnum 085} args } {
+ source ./include.tcl
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree and queue only. Since this is a NIMDB test,
+ # skip queueext.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || [is_queue $method] == 1 } {
+ if { [is_queueext $method] == 0 } {
+ lappend test_methods $method
+ }
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 && [is_queue $method] != 1 } {
+ puts "Skipping internal init test rep$tnum for method $method."
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping in-memory database test rep$tnum for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ rep085_sub $method $niter $tnum $args
+}
+
+proc rep085_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global rep085_page_msg_count rep085_update_req_count
+
+ puts "Rep$tnum ($method): skipping unnecessary abbreviated internal init."
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir [set dirs(A) $testdir/SITE_A]
+ file mkdir [set dirs(B) $testdir/SITE_B]
+ file mkdir [set dirs(C) $testdir/SITE_C]
+
+ set rep085_page_msg_count 0
+ set rep085_update_req_count 0
+
+ puts "\tRep$tnum.a: Create master"
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn \
+ $verbargs \
+ -errpfx SITE_A -errfile /dev/stderr \
+ -home $dirs(A) -rep_transport \[list 1 rep085_send\]"
+ set envs(A) [eval $env_A_cmd -rep_master]
+
+ puts "\tRep$tnum.b: create (only) a regular DB"
+ set start 0
+ eval rep_test $method $envs(A) NULL $niter $start $start 0 $largs
+
+ puts "\tRep$tnum.c: Create two clients"
+ repladd 2
+ set env_B_cmd "berkdb_env_noerr -create -txn \
+ $verbargs \
+ -errpfx SITE_B -errfile /dev/stderr \
+ -home $dirs(B) -rep_transport \[list 2 rep085_send\]"
+ set envs(B) [eval $env_B_cmd -rep_client]
+
+ repladd 3
+ set env_C_cmd "berkdb_env_noerr -create -txn \
+ $verbargs \
+ -errpfx SITE_C -errfile /dev/stderr \
+ -home $dirs(C) -rep_transport \[list 3 rep085_send\]"
+ set envs(C) [eval $env_C_cmd -rep_client]
+
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+
+ # Note that the initial internal init that we've just done should have
+ # the effect of setting this flag. The flag indicates that any NIMDBs
+ # have been loaded, and any full internal init of course accomplishes
+ # that. If there are no NIMDBs whatsoever (which is the case here),
+ # then the condition "any NIMDBs are loaded" is trivially satisfied.
+ #
+ assert_rep_flag $dirs(C) REP_F_NIMDBS_LOADED 1
+
+ # Restart client C with recovery, which forces it to check for NIMDBs
+ # even though a full internal init is not necessary.
+ #
+ puts "\tRep$tnum.d: Bounce client C"
+ $envs(C) close
+ set envs(C) [eval $env_C_cmd -rep_client -recover]
+ assert_rep_flag $dirs(C) REP_F_NIMDBS_LOADED 0
+ set upd_before $rep085_update_req_count
+ set pg_before $rep085_page_msg_count
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+ error_check_good update.msg.sent \
+ $rep085_update_req_count [incr upd_before]
+ error_check_good no.page.msg $rep085_page_msg_count $pg_before
+ assert_rep_flag $dirs(C) REP_F_NIMDBS_LOADED 1
+
+ # Switch masters, forcing client C to re-sync. But this time it already
+ # knows it has NIMDBs, so even an UPDATE_REQ shouldn't be necessary.
+ #
+ puts "\tRep$tnum.e: Switch master to site B"
+ $envs(A) rep_start -client
+ $envs(B) rep_start -master
+ set upd_before $rep085_update_req_count
+ set pg_before $rep085_page_msg_count
+ process_msgs $envlist
+ error_check_good no.update.msg $rep085_update_req_count $upd_before
+ error_check_good no.page.msg.2 $rep085_page_msg_count $pg_before
+
+ $envs(A) close
+ $envs(B) close
+ $envs(C) close
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep085_send { control rec fromid toid flags lsn } {
+ global rep085_page_msg_count rep085_update_req_count
+
+ if {[berkdb msgtype $control] eq "page"} {
+ incr rep085_page_msg_count
+ } elseif {[berkdb msgtype $control] eq "update_req"} {
+ incr rep085_update_req_count
+ }
+
+ return [replsend $control $rec $fromid $toid $flags $lsn]
+}
diff --git a/db-4.8.30/test/rep086.tcl b/db-4.8.30/test/rep086.tcl
new file mode 100644
index 0000000..9b4222d
--- /dev/null
+++ b/db-4.8.30/test/rep086.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# TEST rep086
+# TEST Interrupted abbreviated internal init.
+# TEST
+# TEST Make sure we cleanly remove partially loaded named in-memory
+# TEST databases (NIMDBs).
+
+proc rep086 { method { tnum "086" } args } {
+
+ source ./include.tcl
+
+ # Run for btree and queue only. Since this is a NIMDB test,
+ # skip queueext.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || [is_queue $method] == 1 } {
+ if { [is_queueext $method] == 0 } {
+ lappend test_methods $method
+ }
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 && [is_queue $method] != 1 } {
+ puts "Skipping internal init test rep$tnum for method $method."
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping in-memory database test rep$tnum for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ rep086_sub $method $tnum $args
+}
+
+proc rep086_sub { method tnum largs } {
+
+ global testdir
+ global util_path
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir [set dirs(A) $testdir/SITE_A]
+ file mkdir [set dirs(B) $testdir/SITE_B]
+
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ puts "Rep$tnum ($method): Test of interrupted abbreviated internal init."
+ puts "\tRep$tnum.a: Create master and client."
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn \
+ $verbargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_A \
+ -home $dirs(A) -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd -rep_master]
+
+ # Open a client
+ repladd 2
+ set env_B_cmd "berkdb_env_noerr -create -txn \
+ $verbargs \
+ -log_buffer $log_buf -log_max $log_max -errpfx SITE_B \
+ -home $dirs(B) -rep_transport \[list 2 replsend\]"
+ set envs(B) [eval $env_B_cmd -rep_client]
+
+ set envlist "{$envs(A) 1} {$envs(B) 2}"
+ process_msgs $envlist
+
+ puts "\tRep$tnum.b: Create a regular DB and a few NIMDBs."
+ set niter 200
+ set start 0
+ eval rep_test $method $envs(A) NULL $niter 0 0 0 $largs
+ for { set i 1 } { $i <= 3 } { incr i } {
+ set nimdb [eval {berkdb_open} -env $envs(A) -auto_commit \
+ -create $largs $omethod {""} "mynimdb$i"]
+ eval rep_test $method $envs(A) \
+ $nimdb $niter $start $start 0 $largs
+ $nimdb close
+ }
+ process_msgs $envlist
+
+ puts "\tRep$tnum.c: Bounce client so it has to re-materialize the NIMDBs."
+ $envs(B) close
+ set envs(B) [eval $env_B_cmd -rep_client -recover]
+ set envlist "{$envs(A) 1} {$envs(B) 2}"
+
+ # Here's a summary reminder of the messaging that is taking place in
+ # each of the proc_msgs_once message cycles.
+ #
+ # 1. NEWCLIENT -> NEWMASTER -> VERIFY_REQ (the checkpoint written by
+ # regular recovery)
+ # 2. -> VERIFY -> (no match) VERIFY_REQ (last txn commit in common)
+ # 3. -> VERIFY -> (match, but need NIMDBS) UPDATE_REQ
+ # 4. -> UPDATE -> PAGE_REQ
+ # 5. -> PAGE -> (limited to partial NIMDB content by rep_limit)
+
+ proc_msgs_once $envlist
+ proc_msgs_once $envlist
+ proc_msgs_once $envlist
+ proc_msgs_once $envlist
+
+ # Before doing cycle # 5, set a ridiculously low limit, so that only the
+ # first page of the database will be received on this next cycle.
+ #
+ $envs(A) rep_limit 0 4
+ proc_msgs_once $envlist
+
+ # Just to make sure our test is working the way we think it should,
+ # verify that we are indeed in REP_F_RECOVER_PAGE state.
+ #
+ assert_rep_flag $dirs(B) REP_F_RECOVER_PAGE 1
+
+ # Now, with only a partial materialization of the NIMDB, downgrade the
+ # master, which should cause client to realize its internal init is
+ # interrupted.
+ #
+ $envs(A) rep_limit 0 0
+ $envs(A) rep_start -client
+ proc_msgs_once $envlist
+
+ puts "\tRep$tnum.d: Try to open NIMDBs."
+ for { set i 0 } { $i <= 3 } { incr i } {
+ set cmd [list berkdb_open -env $envs(B) -auto_commit "" "mynimdb$i"]
+ error_check_bad "open partially loaded NIMDB" [catch $cmd] 0
+ }
+
+ $envs(A) close
+ $envs(B) close
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep087.tcl b/db-4.8.30/test/rep087.tcl
new file mode 100644
index 0000000..ccd414a
--- /dev/null
+++ b/db-4.8.30/test/rep087.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# TEST rep087
+# TEST Abbreviated internal init with open file handles.
+# TEST
+# TEST Client has open handle to an on-disk DB when abbreviated
+# TEST internal init starts. Make sure we lock out access, and make sure
+# TEST it ends up as HANDLE_DEAD. Also, make sure that if there are
+# TEST no NIMDBs, that we *don't* get HANDLE_DEAD.
+
+proc rep087 { method { niter 200 } { tnum "087" } args } {
+ source ./include.tcl
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Run for btree and queue only. Since this is a NIMDB test,
+ # explicitly exclude queueext.
+ if { $checking_valid_methods } {
+ set test_methods {}
+ foreach method $valid_methods {
+ if { [is_btree $method] == 1 || [is_queue $method] == 1 } {
+ if { [is_queueext $method] == 0 } {
+ lappend test_methods $method
+ }
+ }
+ }
+ return $test_methods
+ }
+ if { [is_btree $method] != 1 && [is_queue $method] != 1 } {
+ puts "Skipping internal init test rep$tnum for method $method."
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Skipping in-memory database test rep$tnum for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ # Run test with and without a NIMDB present.
+ rep087_sub $method $niter $tnum "true" $args
+ rep087_sub $method $niter $tnum "false" $args
+}
+
+proc rep087_sub { method niter tnum with_nimdb largs } {
+ global testdir
+ global util_path
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+ if { $with_nimdb} {
+ set msg "with"
+ } else {
+ set msg "without"
+ }
+ puts "Rep$tnum ($method):\
+ Abbreviated internal init and dead handles, $msg NIMDB."
+ if { $niter < 3 } {
+ set niter 3
+ puts "\tRep$tnum: the minimum 'niter' value is 3."
+ }
+
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir [set dirs(A) $testdir/SITE_A]
+ file mkdir [set dirs(B) $testdir/SITE_B]
+
+ puts "\tRep$tnum: Create master and client"
+ repladd 1
+ set env_A_cmd "berkdb_env_noerr -create -txn \
+ $verbargs \
+ -errpfx SITE_A \
+ -home $dirs(A) -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd -rep_master]
+
+ # Open a client
+ repladd 2
+ set env_B_cmd "berkdb_env_noerr -create -txn \
+ $verbargs \
+ -errpfx SITE_B \
+ -home $dirs(B) -rep_transport \[list 2 replsend\]"
+ set envs(B) [eval $env_B_cmd -rep_client]
+
+ set envlist "{$envs(A) 1} {$envs(B) 2}"
+ process_msgs $envlist
+
+ if { $with_nimdb } {
+ set msg "and a NIMDB"
+ } else {
+ set msg ""
+ }
+ puts "\tRep$tnum: Create a regular DB $msg"
+ set start 0
+ eval rep_test $method $envs(A) NULL $niter $start $start 0 $largs
+
+ if { $with_nimdb } {
+ set nimdb [eval {berkdb_open} -env $envs(A) -auto_commit \
+ -create $largs $omethod {"" "mynimdb"}]
+ eval rep_test $method $envs(A) \
+ $nimdb $niter $start $start 0 $largs
+ $nimdb close
+ }
+ process_msgs $envlist
+
+ puts "\tRep$tnum: Restart client with recovery"
+ #
+ # In the NIMDB case, this forces the rematerialization of the NIMDB.
+ #
+ $envs(B) close
+ set envs(B) [eval $env_B_cmd -rep_client -recover]
+ set envlist "{$envs(A) 1} {$envs(B) 2}"
+
+ # Before seeking the master, open a DB handle onto the regular DB.
+ # At this point, we should be allowed to read it.
+ #
+ # Try reading a few records. (How many? We arbitrarily choose to try
+ # reading three.) Save one of the keys so that we can use it later in a
+ # "$db get" call. (Superstitiously skip over the first key, in deciding
+ # which one to save, because it is usually a zero-length string.)
+ #
+ set db [berkdb_open_noerr -env $envs(B) -auto_commit test.db]
+ set c [$db cursor]
+ $c get -next
+ set pairs [$c get -next]
+ set a_key [lindex $pairs 0 0]
+ $c get -next
+ $c close
+
+ if { $with_nimdb} {
+ # At this point, the NIMDB is obviously not available, since it
+ # was blown away by the recovery/recreation of regions. Let's
+ # just make sure.
+ #
+ error_check_bad no_nimdb \
+ [catch {berkdb_open_noerr -env $envs(B) \
+ -auto_commit "" "mynimdb"}] 0
+
+ # Use the usual idiom of processing just one message cycle at a
+ # time, so that we can check access during the middle of
+ # internal init. (If no NIMDB, there is no internal init, so
+ # there's no point in doing this for that case.)
+
+ # 1. NEWCLIENT -> NEWMASTER -> VERIFY_REQ (the checkpoint
+ # written by regular recovery)
+ # 2. -> VERIFY -> (no match) VERIFY_REQ (last txn commit in
+ # common)
+ # 3. -> VERIFY -> (match, but need NIMDBS) UPDATE_REQ
+ # 4. -> UPDATE -> PAGE_REQ
+ # 5. -> PAGE -> (limited to partial NIMDB content by
+ # rep_limit)
+
+ proc_msgs_once $envlist
+ proc_msgs_once $envlist
+ proc_msgs_once $envlist
+ proc_msgs_once $envlist
+
+ # Before doing cycle # 5, set a ridiculously low limit, so that
+ # only the first page of the database will be received on this
+ # next cycle.
+ #
+ $envs(A) rep_limit 0 4
+ proc_msgs_once $envlist
+
+ # Now we should be blocked from reading from our DB.
+ puts "\tRep$tnum: Try blocked access (5 second delay)."
+ error_check_bad should_block [catch {$db get $a_key} ret] 0
+ error_check_good deadlock [is_substr $ret DB_LOCK_DEADLOCK] 1
+
+ # Get rid of any limit for the remainder of the test.
+ #
+ $envs(A) rep_limit 0 0
+ }
+
+ # Finish off all pending message processing.
+ #
+ process_msgs $envlist
+
+ if { $with_nimdb } {
+ # We should of course be able to open, and read a few
+ # records from, the NIMDB, now that we've completed the
+ # abbreviated internal init.
+ #
+ set imdb [berkdb_open_noerr -env $envs(B) \
+ -auto_commit "" "mynimdb"]
+ set c [$imdb cursor]
+ $c get -next
+ $c get -next
+ $c get -next
+ $c close
+ $imdb close
+
+ puts "\tRep$tnum: Try access to dead handle."
+ error_check_bad handle_dead [catch {$db get $a_key} ret] 0
+ error_check_good $ret [is_substr $ret DB_REP_HANDLE_DEAD] 1
+
+ $db close
+ set db [berkdb_open_noerr -env $envs(B) -auto_commit test.db]
+ error_check_good reaccess_ok [catch {$db get $a_key} ret] 0
+ } else {
+ puts "\tRep$tnum: Try access to still-valid handle"
+ error_check_good access_ok [catch {$db get $a_key} ret] 0
+ }
+
+ puts "\tRep$tnum: Clean up."
+ $db close
+ $envs(A) close
+ $envs(B) close
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/rep088.tcl b/db-4.8.30/test/rep088.tcl
new file mode 100644
index 0000000..5cd2ad9
--- /dev/null
+++ b/db-4.8.30/test/rep088.tcl
@@ -0,0 +1,248 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009 Oracle. All rights reserved.
+#
+# TEST rep088
+# TEST Replication roll-back preserves checkpoint.
+# TEST
+# TEST Create a situation where a client has to roll back its
+# TEST log, discarding some existing transactions, in order to sync
+# TEST with a new master.
+# TEST
+# TEST 1. When the client still has its entire log file history, all
+# TEST the way back to log file #1, it's OK if the roll-back discards
+# TEST any/all checkpoints.
+# TEST 2. When old log files have been archived, if the roll-back would
+# TEST remove all existing checkpoints it must be forbidden. The log
+# TEST must always have a checkpoint (or all files back through #1).
+# TEST The client must do internal init or return JOIN_FAILURE.
+# TEST 3. (the normal case) Old log files archived, and a checkpoint
+# TEST still exists in the portion of the log which will remain after
+# TEST the roll-back: no internal-init/JOIN_FAILURE necessary.
+#
+# TODO: maybe just reject anything that doesn't comply with my simplified
+# rep_test clone, like fixed-length record methods, etc.
+
+proc rep088 { method { niter 20 } { tnum 088 } args } {
+ source ./include.tcl
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ # Run for btree only.
+ if { $checking_valid_methods } {
+ set test_methods { btree }
+ return $test_methods
+ }
+ if { [is_btree $method] == 0 } {
+ puts "\tRep$tnum: Skipping for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Rep$tnum ($method): Replication roll-back preserves checkpoint."
+ # Note: expected result = "sync" means the client should be allowed to
+ # synchronize normally (to the found sync point), without any need for
+ # internal init.
+ #
+ # Case #1.
+ puts "Rep$tnum: Rollback without checkpoint, with log file 1"
+ set archive false
+ set ckpt false
+ set result sync
+ rep088_sub $method $niter $tnum $archive $ckpt $result $args
+
+ # Case #2.(a).
+ #
+ puts "Rep$tnum: Forbid rollback over only chkp: join failure"
+ set archive true
+ set ckpt false
+ set result join_failure
+ rep088_sub $method $niter $tnum $archive $ckpt $result $args
+
+ # Case #2.(b): essentially the same, but allow the internal init to
+ # happen, so that we verify that the subsequent restart with recovery
+ # works fine. NB: this is the obvious failure case prior to bug fix
+ # #16732.
+ #
+ puts "Rep$tnum: Forbid rollback over only chkp: internal init"
+ set archive true
+ set ckpt false
+ set result internal_init
+ rep088_sub $method $niter $tnum $archive $ckpt $result $args
+
+ # Case #3.
+ puts "Rep$tnum: Rollback with sufficient extra checkpoints"
+ set archive true
+ set ckpt true
+ set result sync
+ rep088_sub $method $niter $tnum $archive $ckpt $result $args
+}
+
+proc rep088_sub { method niter tnum archive ckpt result largs } {
+ source ./include.tcl
+ global testdir
+ global util_path
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ replsetup $testdir/MSGQUEUEDIR
+
+ file mkdir [set dirA $testdir/A]
+ file mkdir [set dirB $testdir/B]
+ file mkdir [set dirC $testdir/C]
+
+ set pagesize 4096
+ append largs " -pagesize $pagesize "
+ set log_buf [expr $pagesize * 2]
+ set log_max [expr $log_buf * 4]
+
+ puts "\tRep$tnum.a: Create master and two clients"
+ repladd 1
+ set env_A_cmd "berkdb_env -create -txn $verbargs \
+ -log_buffer $log_buf -log_max $log_max \
+ -errpfx SITE_A -errfile /dev/stderr \
+ -home $dirA -rep_transport \[list 1 replsend\]"
+ set envs(A) [eval $env_A_cmd -rep_master]
+
+ repladd 2
+ set env_B_cmd "berkdb_env -create -txn $verbargs \
+ -log_buffer $log_buf -log_max $log_max \
+ -errpfx SITE_B -errfile /dev/stderr \
+ -home $dirB -rep_transport \[list 2 replsend\]"
+ set envs(B) [eval $env_B_cmd -rep_client]
+
+ repladd 3
+ set env_C_cmd "berkdb_env -create -txn $verbargs \
+ -log_buffer $log_buf -log_max $log_max \
+ -errpfx SITE_C -errfile /dev/stderr \
+ -home $dirC -rep_transport \[list 3 replsend\]"
+ set envs(C) [eval $env_C_cmd -rep_client]
+
+ set envlist "{$envs(A) 1} {$envs(B) 2} {$envs(C) 3}"
+ process_msgs $envlist
+ $envs(A) test force noarchive_timeout
+
+ # Using small log file size, push into the second log file.
+ #
+ puts "\tRep$tnum.b: Write enough txns to exceed 1 log file"
+ while { [lsn_file [next_expected_lsn $envs(C)]] == 1 } {
+ eval rep088_reptest $method $envs(A) $niter $largs
+ process_msgs $envlist
+ }
+
+ # To make sure everything still works in the normal case, put in a
+ # checkpoint here before writing the transactions that will have to be
+ # rolled back. Later, when the client sees that it must roll back over
+ # (and discard) the later checkpoint, the fact that this checkpoint is
+ # here will allow it to proceed.
+ #
+ if { $ckpt } {
+ puts "\tRep$tnum.c: put in an 'extra' checkpoint."
+ $envs(A) txn_checkpoint
+ process_msgs $envlist
+ }
+
+ # Turn off client TBM (the one that will become master later).
+ #
+ puts "\tRep$tnum.d: Turn off client B and write more txns"
+ $envs(B) close
+ set envlist "{$envs(A) 1} {$envs(C) 3}"
+
+ # Fill a bit more log, and then write a checkpoint.
+ #
+ eval rep088_reptest $method $envs(A) $niter $largs
+ $envs(A) txn_checkpoint
+ replclear 2
+ process_msgs $envlist
+
+ # At the client under test, archive away the first log file.
+ #
+ if { $archive } {
+ puts "\tRep$tnum.e: Archive log at client C"
+ exec $util_path/db_archive -d -h $dirC
+ }
+
+ # Maybe another cycle of filling and checkpoint.
+ #
+ eval rep088_reptest $method $envs(A) $niter $largs
+ $envs(A) txn_checkpoint
+ replclear 2
+ process_msgs $envlist
+
+ # Now turn off the master, and turn on the TBM site as master. The
+ # client under test has to sync with the new master. Just to make sure
+ # I understand what's going on, turn off auto-init.
+ #
+
+ if { $result != "internal_init" } {
+ $envs(C) rep_config {noautoinit on}
+ }
+ puts "\tRep$tnum.f: Switch master to site B, try to sync client C"
+ $envs(A) close
+ set envs(B) [eval $env_B_cmd -rep_master]
+ set envlist "{$envs(B) 2} {$envs(C) 3}"
+ replclear 1
+ set succeeded [catch { process_msgs $envlist } ret]
+
+ switch $result {
+ internal_init {
+ error_check_good inited $succeeded 0
+
+ # Now stop the client, and try restarting it with
+ # recovery.
+ #
+ $envs(C) close
+ set envs(C) [eval $env_C_cmd -rep_client -recover]
+ }
+ join_failure {
+ error_check_bad no_autoinit $succeeded 0
+ error_check_good join_fail \
+ [is_substr $ret DB_REP_JOIN_FAILURE] 1
+ }
+ sync {
+ error_check_good sync_ok $succeeded 0
+ error_check_good not_outdated \
+ [stat_field $envs(C) rep_stat \
+ "Outdated conditions"] 0
+ }
+ default {
+ error "FAIL: unknown test result option $result"
+ }
+ }
+
+ $envs(C) close
+ $envs(B) close
+ replclose $testdir/MSGQUEUEDIR
+}
+
+# A simplified clone of proc rep_test, with the crucial distinction that it
+# doesn't do any of its own checkpointing. For this test we need explicit
+# control of when checkpoints should happen. This proc doesn't support
+# access methods using record numbers.
+proc rep088_reptest { method env niter args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set largs [convert_args $method $args]
+ set db [eval berkdb_open_noerr -env $env -auto_commit \
+ -create $omethod $largs test.db]
+
+ set did [open $dict]
+ for { set i 0 } { $i < $niter && [gets $did str] >= 0 } { incr i } {
+ set key $str
+ set str [reverse $str]
+ $db put $key $str
+ }
+ close $did
+ $db close
+}
diff --git a/db-4.8.30/test/repmgr001.tcl b/db-4.8.30/test/repmgr001.tcl
new file mode 100644
index 0000000..5b2f6c5
--- /dev/null
+++ b/db-4.8.30/test/repmgr001.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr001
+# TEST Basic repmgr test.
+# TEST
+# TEST Create an appointed master and two clients, process some records and
+# TEST verify resulting databases.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr001 { method { niter 100 } { tnum "001" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr test."
+ basic_repmgr_test $method $niter $tnum 0 0 0 0 0 $args
+}
+
diff --git a/db-4.8.30/test/repmgr002.tcl b/db-4.8.30/test/repmgr002.tcl
new file mode 100644
index 0000000..62c7904
--- /dev/null
+++ b/db-4.8.30/test/repmgr002.tcl
@@ -0,0 +1,44 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr002
+# TEST Basic repmgr election test.
+# TEST
+# TEST Open three clients of different priorities and make sure repmgr
+# TEST elects expected master. Shut master down, make sure repmgr elects
+# TEST expected remaining client master, make sure former master can join
+# TEST as client.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr002 { method { niter 100 } { tnum "002" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr election test."
+ basic_repmgr_election_test $method $niter $tnum 0 $args
+}
diff --git a/db-4.8.30/test/repmgr003.tcl b/db-4.8.30/test/repmgr003.tcl
new file mode 100644
index 0000000..126604a
--- /dev/null
+++ b/db-4.8.30/test/repmgr003.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr003
+# TEST Basic repmgr internal init test.
+# TEST
+# TEST Start an appointed master site and two clients, processing
+# TEST transactions between each additional site. Verify all expected
+# TEST transactions are replicated.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr003 { method { niter 100 } { tnum "003" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr internal init test."
+ basic_repmgr_init_test $method $niter $tnum 0 $args
+}
diff --git a/db-4.8.30/test/repmgr004.tcl b/db-4.8.30/test/repmgr004.tcl
new file mode 100644
index 0000000..4c8cc64
--- /dev/null
+++ b/db-4.8.30/test/repmgr004.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr004
+# TEST Basic repmgr test with in-memory logs.
+# TEST
+# TEST Create an appointed master and two clients, process some records and
+# TEST verify resulting databases.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr004 { method { niter 100 } { tnum "004" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr test in-memory logs."
+ basic_repmgr_test $method $niter $tnum 0 1 0 0 0 $args
+}
+
diff --git a/db-4.8.30/test/repmgr005.tcl b/db-4.8.30/test/repmgr005.tcl
new file mode 100644
index 0000000..e84db7c
--- /dev/null
+++ b/db-4.8.30/test/repmgr005.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr005
+# TEST Basic repmgr test with in-memory databases.
+# TEST
+# TEST Create an appointed master and two clients, process some records and
+# TEST verify resulting databases.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr005 { method { niter 100 } { tnum "005" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr test in-memory databases."
+ basic_repmgr_test $method $niter $tnum 1 0 0 0 0 $args
+}
+
diff --git a/db-4.8.30/test/repmgr006.tcl b/db-4.8.30/test/repmgr006.tcl
new file mode 100644
index 0000000..d00a052
--- /dev/null
+++ b/db-4.8.30/test/repmgr006.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr006
+# TEST Basic repmgr test with bulk processing.
+# TEST
+# TEST Create an appointed master and two clients, process some records and
+# TEST verify resulting databases.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr006 { method { niter 1000 } { tnum "006" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr test bulk processing."
+ basic_repmgr_test $method $niter $tnum 0 0 0 1 0 $args
+}
+
diff --git a/db-4.8.30/test/repmgr007.tcl b/db-4.8.30/test/repmgr007.tcl
new file mode 100644
index 0000000..7f95b9c
--- /dev/null
+++ b/db-4.8.30/test/repmgr007.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr007
+# TEST Basic repmgr client shutdown/restart test.
+# TEST
+# TEST Start an appointed master site and two clients. Shutdown and
+# TEST restart each client, processing transactions after each restart.
+# TEST Verify all expected transactions are replicated.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr007 { method { niter 100 } { tnum "007" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr client shutdown/restart test."
+ repmgr007_sub $method $niter $tnum $args
+}
+
+proc repmgr007_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 3
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start a master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ # Open first client
+ puts "\tRepmgr$tnum.b: Start first client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ # Open second client
+ puts "\tRepmgr$tnum.c: Start second client."
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread"
+ set clientenv2 [eval $cl2_envcmd]
+ $clientenv2 repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -start client
+ await_startup_done $clientenv2
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.d: Run first set of transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.e: Shut down first client, wait and restart it."
+ error_check_good client_close [$clientenv close] 0
+ tclsleep 5
+ # Open -recover to clear env region, including startup_done value.
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.f: Run second set of transactions at master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.g: Verifying client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+
+ puts "\tRepmgr$tnum.h: Shut down second client, wait and restart it."
+ error_check_good client_close [$clientenv2 close] 0
+ tclsleep 5
+ # Open -recover to clear env region, including startup_done value.
+ set clientenv2 [eval $cl2_envcmd -recover]
+ $clientenv2 repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -start client
+ await_startup_done $clientenv2
+
+ puts "\tRepmgr$tnum.i: Run third set of transactions at master."
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+
+ puts "\tRepmgr$tnum.j: Verifying client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+
+ error_check_good client2_close [$clientenv2 close] 0
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/db-4.8.30/test/repmgr008.tcl b/db-4.8.30/test/repmgr008.tcl
new file mode 100644
index 0000000..83eb102
--- /dev/null
+++ b/db-4.8.30/test/repmgr008.tcl
@@ -0,0 +1,43 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr008
+# TEST Basic repmgr test with client-to-client configuration.
+# TEST
+# TEST Create a master and two clients, process some records and verify
+# TEST resulting databases.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr008 { method { niter 100 } { tnum "008" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr test client-to-client."
+ basic_repmgr_test $method $niter $tnum 0 0 1 0 0 $args
+}
+
diff --git a/db-4.8.30/test/repmgr009.tcl b/db-4.8.30/test/repmgr009.tcl
new file mode 100644
index 0000000..f20d1fe
--- /dev/null
+++ b/db-4.8.30/test/repmgr009.tcl
@@ -0,0 +1,184 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr009
+# TEST repmgr API error test.
+# TEST
+# TEST Try a variety of repmgr calls that result in errors. Also
+# TEST try combinations of repmgr and base replication API calls
+# TEST that result in errors.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr009 { method { niter 10 } { tnum "009" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr API error test."
+ repmgr009_sub $method $niter $tnum $args
+}
+
+proc repmgr009_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports [expr $nsites * 5]]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set masterdir2 $testdir/MASTERDIR2
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $masterdir2
+ file mkdir $clientdir
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ puts "\tRepmgr$tnum.a: Set up environment without repmgr."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.b: Call repmgr without open master (error)."
+ catch {$masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master} res
+ error_check_good errchk [is_substr $res "invalid command"] 1
+
+ puts "\tRepmgr$tnum.c: Call repmgr_stat without open master (error)."
+ catch {[stat_field $masterenv repmgr_stat "Connections dropped"]} res
+ error_check_good errchk [is_substr $res "invalid command"] 1
+
+ puts "\tRepmgr$tnum.d: Start a master with repmgr."
+ repladd 1
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ puts "\tRepmgr$tnum.e: Start repmgr with no local sites (error)."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ catch {$clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -remote [list localhost [lindex $ports 7]] \
+ -start client} res
+ error_check_good errchk [is_substr $res \
+ "set_local_site must be called before repmgr_start"] 1
+ error_check_good client_close [$clientenv close] 0
+
+ puts "\tRepmgr$tnum.f: Start repmgr with two local sites (error)."
+ set clientenv [eval $cl_envcmd]
+ catch {$clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 8]] \
+ -local [list localhost [lindex $ports 9]] \
+ -start client} res
+ error_check_good errchk [string match "*already*set*" $res] 1
+ error_check_good client_close [$clientenv close] 0
+
+ puts "\tRepmgr$tnum.g: Start a client."
+ repladd 2
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.h: Start repmgr a second time (error)."
+ catch {$clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client} res
+ error_check_good errchk [is_substr $res "must be called before"] 1
+
+ puts "\tRepmgr$tnum.i: Call rep_start after starting repmgr (error)."
+ catch {set clientenv [eval $cl_envcmd -rep_client -rep_transport \
+ \[list 2 replsend\]]} res
+ error_check_good errchk [is_substr $res \
+ "type mismatch for a replication process"] 1
+
+ puts "\tRepmgr$tnum.j: Call rep_process_message (error)."
+ set envlist "{$masterenv 1} {$clientenv 2}"
+ catch {$clientenv rep_process_message 0 0 0} res
+ error_check_good errchk [is_substr $res \
+ "cannot call from Replication Manager application"] 1
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.k: Run some transactions at master."
+ eval rep_test $method $masterenv NULL $niter $niter 0 0 $largs
+
+ puts "\tRepmgr$tnum.l: Call rep_elect (error)."
+ catch {$clientenv rep_elect 2 2 2 5000000} res
+ error_check_good errchk [is_substr $res \
+ "cannot call from Replication Manager application"] 1
+
+ puts "\tRepmgr$tnum.m: Verifying client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+
+ puts "\tRepmgr$tnum.n: Start a master with base API rep_start."
+ set ma_envcmd2 "berkdb_env_noerr -create $verbargs \
+ -home $masterdir2 -errpfx MASTER -txn -thread -rep_master \
+ -rep_transport \[list 1 replsend\]"
+ set masterenv2 [eval $ma_envcmd2]
+
+ puts "\tRepmgr$tnum.o: Call repmgr after rep_start (error)."
+ catch {$masterenv2 repmgr -ack all -nsites $nsites \
+ -local [list localhost [lindex $ports 0]] \
+ -start master} res
+ # Internal repmgr calls return EINVAL after hitting
+ # base API application test.
+ error_check_good errchk [is_substr $res "invalid argument"] 1
+
+ error_check_good masterenv_close [$masterenv2 close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/db-4.8.30/test/repmgr010.tcl b/db-4.8.30/test/repmgr010.tcl
new file mode 100644
index 0000000..7cc0b24
--- /dev/null
+++ b/db-4.8.30/test/repmgr010.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr010
+# TEST Acknowledgement policy and timeout test.
+# TEST
+# TEST Verify that "quorum" acknowledgement policy succeeds with fewer than
+# TEST nsites running. Verify that "all" acknowledgement policy results in
+# TEST ack failures with fewer than nsites running.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr010 { method { niter 100 } { tnum "010" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr ack policy and timeout test."
+ repmgr010_sub $method $niter $tnum $args
+}
+
+proc repmgr010_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 3
+
+ set small_iter [expr $niter / 10]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ puts "\tRepmgr$tnum.a: Start master, two clients, ack policy quorum."
+ # Open a master.
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack quorum -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ # Open first client
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack quorum -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ # Open second client
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread"
+ set clientenv2 [eval $cl2_envcmd]
+ $clientenv2 repmgr -ack quorum -nsites $nsites \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -start client
+ await_startup_done $clientenv2
+
+ puts "\tRepmgr$tnum.b: Run first set of transactions at master."
+ set start 0
+ eval rep_test $method $masterenv NULL $niter $start 0 0 $largs
+ incr start $niter
+
+ #
+ # Special verification needed for quorum ack policy. Wait
+ # longer than ack timeout (default 1 second) then check for
+ # ack failures (perm_failed events). Quorum only guarantees
+ # that transactions replicated to one site or the other, so
+ # test for this condition instead of both sites.
+ #
+ puts "\tRepmgr$tnum.c: Verify both client databases, no ack failures."
+ tclsleep 5
+ error_check_good quorum_perm_failed1 \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] 0
+ catch {rep_verify\
+ $masterdir $masterenv $clientdir $clientenv 1 1 1} ver1
+ catch {rep_verify\
+ $masterdir $masterenv $clientdir2 $clientenv2 1 1 1} ver2
+ error_check_good onesite [expr [string length $ver1] == 0 || \
+ [string length $ver2] == 0] 1
+
+ puts "\tRepmgr$tnum.d: Shut down first client."
+ error_check_good client_close [$clientenv close] 0
+
+ puts "\tRepmgr$tnum.e: Run second set of transactions at master."
+ eval rep_test $method $masterenv NULL $small_iter $start 0 0 $largs
+ incr start $niter
+
+ puts "\tRepmgr$tnum.f: Verify client database, no ack failures."
+ tclsleep 5
+ error_check_good quorum_perm_failed2 \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] 0
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+
+ puts "\tRepmgr$tnum.g: Adjust all sites to ack policy all."
+ # Reopen first client with ack policy all
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ # Open -recover to clear env region, including startup_done value.
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ # Adjust other sites to ack policy all
+ $masterenv repmgr -ack all
+ $clientenv2 repmgr -ack all
+
+ puts "\tRepmgr$tnum.h: Shut down first client."
+ error_check_good client_close [$clientenv close] 0
+ set init_perm_failed \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"]
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.i: Run third set of transactions at master."
+ eval rep_test $method $masterenv NULL $small_iter $start 0 0 $largs
+
+ puts "\tRepmgr$tnum.j: Verify client database, some ack failures."
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+ error_check_good all_perm_failed [expr \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] \
+ > $init_perm_failed] 1
+
+ error_check_good client2_close [$clientenv2 close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/db-4.8.30/test/repmgr011.tcl b/db-4.8.30/test/repmgr011.tcl
new file mode 100644
index 0000000..4606922
--- /dev/null
+++ b/db-4.8.30/test/repmgr011.tcl
@@ -0,0 +1,129 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr011
+# TEST repmgr two site strict majority test.
+# TEST
+# TEST Start an appointed master and one client with 2 site strict
+# TEST majority set. Shut down the master site, wait and verify that
+# TEST the client site was not elected master. Start up master site
+# TEST and verify that transactions are processed as expected.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr011 { method { niter 100 } { tnum "011" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr two site strict majority test."
+ repmgr011_sub $method $niter $tnum $args
+}
+
+proc repmgr011_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ # Open first client as master and set 2site_strict.
+ puts "\tRepmgr$tnum.a: Start first client as master."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+ error_check_good c1strict [$clientenv rep_config {mgr2sitestrict on}] 0
+
+ # Open second client and set 2site_strict.
+ puts "\tRepmgr$tnum.b: Start second client."
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread"
+ set clientenv2 [eval $cl2_envcmd]
+ $clientenv2 repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv2
+ error_check_good c2strict [$clientenv2 rep_config \
+ {mgr2sitestrict on}] 0
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.c: Run first set of transactions at master."
+ eval rep_test $method $clientenv NULL $niter 0 0 0 $largs
+
+ puts "\tRepmgr$tnum.d: Verifying client database contents."
+ rep_verify $clientdir $clientenv $clientdir2 $clientenv2 1 1 1
+
+ puts "\tRepmgr$tnum.e: Shut down first client (current master)."
+ error_check_good client_close [$clientenv close] 0
+
+ puts "\tRepmgr$tnum.f: Wait, then verify no master."
+ tclsleep 20
+ error_check_bad c2_master [stat_field $clientenv2 rep_stat "Master"] 1
+
+ puts "\tRepmgr$tnum.g: Restart first client as master"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -start master
+ await_expected_master $clientenv
+
+ puts "\tRepmgr$tnum.h: Run second set of transactions at master."
+ eval rep_test $method $clientenv NULL $niter $niter 0 0 $largs
+
+ puts "\tRepmgr$tnum.i: Verifying client database contents."
+ rep_verify $clientdir $clientenv $clientdir2 $clientenv2 1 1 1
+
+ error_check_good client2_close [$clientenv2 close] 0
+ error_check_good client_close [$clientenv close] 0
+}
diff --git a/db-4.8.30/test/repmgr012.tcl b/db-4.8.30/test/repmgr012.tcl
new file mode 100644
index 0000000..dfaf40d
--- /dev/null
+++ b/db-4.8.30/test/repmgr012.tcl
@@ -0,0 +1,136 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr012
+# TEST repmgr heartbeat test.
+# TEST
+# TEST Start an appointed master and one client site. Set heartbeat
+# TEST send and monitor values and process some transactions. Stop
+# TEST sending heartbeats from master and verify that client sees
+# TEST a dropped connection.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr012 { method { niter 100 } { tnum "012" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr heartbeat test."
+ repmgr012_sub $method $niter $tnum $args
+}
+
+proc repmgr012_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start a master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ # Open a client.
+ puts "\tRepmgr$tnum.b: Start a client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.c: Run first set of transactions at master."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ puts "\tRepmgr$tnum.d: Verifying client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ # Timeouts are in microseconds, heartbeat monitor should be
+ # longer than heartbeat_send.
+ puts "\tRepmgr$tnum.e: Set heartbeat timeouts."
+ $masterenv repmgr -timeout {heartbeat_send 5000000}
+ $clientenv repmgr -timeout {heartbeat_monitor 10000000}
+
+ puts "\tRepmgr$tnum.f: Run second set of transactions at master."
+ eval rep_test $method $masterenv NULL $niter $niter 0 0 $largs
+
+ puts "\tRepmgr$tnum.g: Verifying client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+
+ # Make sure client reacts to the lost master connection by holding an
+ # election. To do so, first check initial value of stats, then make
+ # sure it increases.
+ #
+ set init_eh [stat_field $clientenv rep_stat "Elections held"]
+ set init_cd [stat_field $clientenv repmgr_stat "Connections dropped"]
+
+ # Make sure client notices the lack of heartbeat. Since the client's
+ # heartbeat monitoring granularity is 10 seconds, if we wait up to 15
+ # seconds that ought to give it plenty of time to notice and react.
+ #
+ puts "\tRepmgr$tnum.h: Remove master heartbeat and wait."
+ $masterenv repmgr -timeout {heartbeat_send 0}
+ set max_wait 15
+ await_condition {[stat_field $clientenv rep_stat \
+ "Elections held"] > $init_eh} $max_wait
+ error_check_good conndrop [expr \
+ [stat_field $clientenv repmgr_stat "Connections dropped"] \
+ > $init_cd] 1
+
+ error_check_good client_close [$clientenv close] 0
+ error_check_good master_close [$masterenv close] 0
+}
diff --git a/db-4.8.30/test/repmgr013.tcl b/db-4.8.30/test/repmgr013.tcl
new file mode 100644
index 0000000..64f4266
--- /dev/null
+++ b/db-4.8.30/test/repmgr013.tcl
@@ -0,0 +1,129 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr013
+# TEST Site list test.
+# TEST
+# TEST Configure a master and two clients where one client is a peer of
+# TEST the other and verify resulting site lists.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr013 { method { niter 100 } { tnum "013" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): repmgr site list test."
+ repmgr013_sub $method $niter $tnum $args
+}
+
+proc repmgr013_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 3
+
+ set small_iter [expr $niter / 10]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ puts "\tRepmgr$tnum.a: Start a master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ puts "\tRepmgr$tnum.b: Start first client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.c: Start second client as peer of first."
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread"
+ set clientenv2 [eval $cl2_envcmd]
+ $clientenv2 repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1] peer] \
+ -start client
+ await_startup_done $clientenv2
+
+ puts "\tRepmgr$tnum.d: Get repmgr site lists and verify."
+ verify_sitelist [$masterenv repmgr_site_list]
+ verify_sitelist [$clientenv repmgr_site_list]
+ verify_sitelist [$clientenv2 repmgr_site_list]
+
+ error_check_good client2_close [$clientenv2 close] 0
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+proc verify_sitelist { sitelist } {
+ # Make sure there are 2 other sites.
+ error_check_good lenchk [llength $sitelist] 2
+
+ # Make sure eid, port and status are integers, host is
+ # expected string value.
+ foreach tuple $sitelist {
+ error_check_good eidchk [string is integer -strict \
+ [lindex $tuple 0]] 1
+ error_check_good hostchk [lindex $tuple 1] "localhost"
+ error_check_good eidchk [string is integer -strict \
+ [lindex $tuple 2]] 1
+ error_check_bad statchk [lindex $tuple 3] unknown
+ }
+}
diff --git a/db-4.8.30/test/repmgr014.tcl b/db-4.8.30/test/repmgr014.tcl
new file mode 100644
index 0000000..7b999d9
--- /dev/null
+++ b/db-4.8.30/test/repmgr014.tcl
@@ -0,0 +1,44 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr014
+# TEST Basic repmgr in-memory test.
+# TEST
+# TEST Create an appointed master and two clients, process some records and
+# TEST verify resulting databases. Put databases, logs and replication files
+# TEST in-memory.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr014 { method { niter 100 } { tnum "014" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Basic repmgr in-memory test."
+ basic_repmgr_test $method $niter $tnum 1 1 0 0 1 $args
+}
+
diff --git a/db-4.8.30/test/repmgr015.tcl b/db-4.8.30/test/repmgr015.tcl
new file mode 100644
index 0000000..ee3e388
--- /dev/null
+++ b/db-4.8.30/test/repmgr015.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr015
+# TEST Basic repmgr in-memory election test.
+# TEST
+# TEST Open three clients of different priorities and make sure repmgr
+# TEST elects expected master. Shut master down, make sure repmgr elects
+# TEST expected remaining client master, make sure former master can join
+# TEST as client. Replication files are in-memory; databases, logs and
+# TEST environment regions are on-disk.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr015 { method { niter 100 } { tnum "015" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts -nonewline "Repmgr$tnum ($method): Basic repmgr election test "
+ puts "with rep files in-memory."
+ basic_repmgr_election_test $method $niter $tnum 1 $args
+}
diff --git a/db-4.8.30/test/repmgr016.tcl b/db-4.8.30/test/repmgr016.tcl
new file mode 100644
index 0000000..8121f4f
--- /dev/null
+++ b/db-4.8.30/test/repmgr016.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr016
+# TEST Basic repmgr in-memory internal init test.
+# TEST
+# TEST Start an appointed master site and two clients, processing
+# TEST transactions between each additional site. Verify all expected
+# TEST transactions are replicated. Replication files are in-memory;
+# TEST databases, logs and environment regions are on-disk.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr016 { method { niter 100 } { tnum "016" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts -nonewline "Repmgr$tnum ($method): Basic repmgr internal init "
+ puts "test with rep files in-memory."
+ basic_repmgr_init_test $method $niter $tnum 1 $args
+}
diff --git a/db-4.8.30/test/repmgr017.tcl b/db-4.8.30/test/repmgr017.tcl
new file mode 100644
index 0000000..5ef9ef8
--- /dev/null
+++ b/db-4.8.30/test/repmgr017.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr017
+# TEST repmgr in-memory cache overflow test.
+# TEST
+# TEST Start an appointed master site and one client, putting databases,
+# TEST environment regions, logs and replication files in-memory. Set
+# TEST very small cachesize and run enough transactions to overflow cache.
+# TEST Shut down and restart master and client, giving master a larger cache.
+# TEST Run and verify a small number of transactions.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr017 { method { niter 1000 } { tnum "017" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts \
+ "Repmgr$tnum ($method): repmgr in-memory cache overflow test."
+ repmgr017_sub $method $niter $tnum $args
+}
+
+proc repmgr017_sub { method niter tnum largs } {
+ global rep_verbose
+ global verbose_type
+ global databases_in_memory
+
+ # Force databases in-memory for this test but preserve original
+ # value to restore later so that other tests aren't affected.
+ set restore_dbinmem $databases_in_memory
+ set databases_in_memory 1
+
+ # No need for test directories because test is entirely in-memory.
+
+ set nsites 2
+ set ports [available_ports $nsites]
+ set omethod [convert_method $method]
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ # In-memory logs cannot be used with -txn nosync.
+ set logargs [adjust_logargs "in-memory"]
+ set txnargs [adjust_txnargs "in-memory"]
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ # Open a master with a very small cache.
+ puts "\tRepmgr$tnum.a: Start a master with a very small cache."
+ set cacheargs "-cachesize {0 32768 1}"
+ set ma_envcmd "berkdb_env_noerr -create $logargs $txnargs $verbargs \
+ -errpfx MASTER -rep -thread -rep_inmem_files -private $cacheargs"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ # Open a client
+ puts "\tRepmgr$tnum.b: Start a client."
+ set cl_envcmd "berkdb_env_noerr -create $logargs $txnargs $verbargs \
+ -errpfx CLIENT -rep -thread -rep_inmem_files -private"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.c: Run master transactions and verify full cache."
+ set dbname { "" "test.db" }
+ set mdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ set stat [catch {
+ rep_test $method $masterenv $mdb $niter 0 0 0 $largs } ret ]
+ error_check_good broke $stat 1
+ error_check_good nomem \
+ [is_substr $ret "not enough memory"] 1
+
+ puts "\tRepmgr$tnum.d: Close master and client."
+ error_check_good mdb_close [$mdb close] 0
+ error_check_good client_close [$clientenv close] 0
+ # Master close should return invalid argument.
+ catch { $masterenv close } ret2
+ error_check_good needrec [is_substr $ret2 "invalid argument"] 1
+
+ puts "\tRepmgr$tnum.e: Restart master (with larger cache) and client."
+ # Recovery is a no-op with everything in-memory, but specify it
+ # anyway after closing the master environment with an error.
+ set cacheargs ""
+ set masterenv [eval $ma_envcmd -recover]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ # Open -recover to clear env region, including startup_done value.
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.f: Perform small number of transactions on master."
+ set numtxns 10
+ set mdb [eval "berkdb_open_noerr -create $omethod -auto_commit \
+ -env $masterenv $largs $dbname"]
+ set t [$masterenv txn]
+ for { set i 1 } { $i <= $numtxns } { incr i } {
+ error_check_good db_put \
+ [eval $mdb put -txn $t $i [chop_data $method data$i]] 0
+ }
+ error_check_good txn_commit [$t commit] 0
+
+ puts "\tRepmgr$tnum.g: Verify transactions on client."
+ set cdb [eval "berkdb_open_noerr -create -mode 0644 $omethod \
+ -env $clientenv $largs $dbname"]
+ error_check_good reptest_db [is_valid_db $cdb] TRUE
+ for { set i 1 } { $i <= $numtxns } { incr i } {
+ set ret [lindex [$cdb get $i] 0]
+ error_check_good cdb_get $ret [list $i \
+ [pad_data $method data$i]]
+ }
+
+ # If the test had erroneously created replication files, they would
+ # be in the current working directory. Verify that this didn't happen.
+ puts "\tRepmgr$tnum.h: Verify no replication files on disk."
+ no_rep_files_on_disk "."
+
+ # Restore original databases_in_memory value.
+ set databases_in_memory $restore_dbinmem
+
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good mdb_close [$mdb close] 0
+ error_check_good client_close [$clientenv close] 0
+ error_check_good master_close [$masterenv close] 0
+}
diff --git a/db-4.8.30/test/repmgr018.tcl b/db-4.8.30/test/repmgr018.tcl
new file mode 100644
index 0000000..a5b2c67
--- /dev/null
+++ b/db-4.8.30/test/repmgr018.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr018
+# TEST Check repmgr stats.
+# TEST
+# TEST Start an appointed master and one client. Shut down the client,
+# TEST run some transactions at the master and verify that there are
+# TEST acknowledgement failures and one dropped connection. Shut down
+# TEST and restart client again and verify that there are two dropped
+# TEST connections.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr018 { method { niter 20 } { tnum "018" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts "Repmgr$tnum ($method): Test of repmgr stats."
+ repmgr018_sub $method $niter $tnum $args
+}
+
+proc repmgr018_sub { method niter tnum largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ set nsites 2
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start a master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs -errpfx MASTER \
+ -home $masterdir -txn -rep -thread"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ # Open a client
+ puts "\tRepmgr$tnum.b: Start a client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs -errpfx CLIENT \
+ -home $clientdir -txn -rep -thread"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.c: Run some transactions at master."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ error_check_good perm_no_failed_stat \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] 0
+
+ error_check_good no_connections_dropped \
+ [stat_field $masterenv repmgr_stat "Connections dropped"] 0
+
+ $clientenv close
+
+ # Just do a few transactions (i.e., 3 of them), because each one is
+ # expected to time out, and if we did many the test would take a long
+ # time (with no benefit).
+ #
+ puts "\tRepmgr$tnum.d: Run transactions with no client."
+ eval rep_test $method $masterenv NULL 3 $niter $niter 0 $largs
+
+ error_check_bad perm_failed_stat \
+ [stat_field $masterenv repmgr_stat "Acknowledgement failures"] 0
+
+ # Wait up to 20 seconds when testing for dropped connections. This
+ # corresponds to the master connection_retry timeout.
+ set max_wait 20
+ await_condition {[stat_field $masterenv repmgr_stat \
+ "Connections dropped"] == 1} $max_wait
+
+ # Bring the client back up, and down, a couple times, to test resetting
+ # of stats.
+ #
+ puts "\tRepmgr$tnum.e: Shut down client (pause), check dropped connection."
+ # Open -recover to clear env region, including startup_done value.
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+ $clientenv close
+
+ await_condition {[stat_field $masterenv repmgr_stat \
+ "Connections dropped"] == 2} $max_wait
+ $masterenv repmgr_stat -clear
+
+ puts "\tRepmgr$tnum.f: Shut down, pause, check dropped connection (reset)."
+ # Open -recover to clear env region, including startup_done value.
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -start client
+ await_startup_done $clientenv
+ $clientenv close
+
+ await_condition {[stat_field $masterenv repmgr_stat \
+ "Connections dropped"] == 1} $max_wait
+
+ error_check_good masterenv_close [$masterenv close] 0
+}
diff --git a/db-4.8.30/test/repmgr019.tcl b/db-4.8.30/test/repmgr019.tcl
new file mode 100644
index 0000000..177ec67
--- /dev/null
+++ b/db-4.8.30/test/repmgr019.tcl
@@ -0,0 +1,44 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2007-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST repmgr019
+# TEST Basic repmgr test with in-memory databases and in-memory logs.
+# TEST
+# TEST Create an appointed master and two clients, process some records and
+# TEST verify resulting databases.
+# TEST
+# TEST Run for btree only because access method shouldn't matter.
+# TEST
+proc repmgr019 { method { niter 100 } { tnum "019" } args } {
+
+ source ./include.tcl
+
+ if { $is_freebsd_test == 1 } {
+ puts "Skipping replication manager test on FreeBSD platform."
+ return
+ }
+
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win9x platform."
+ return
+ }
+
+ # Skip for all methods except btree.
+ if { $checking_valid_methods } {
+ return btree
+ }
+ if { [is_btree $method] == 0 } {
+ puts "Repmgr$tnum: skipping for non-btree method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+
+ puts \
+ "Repmgr$tnum ($method): Basic repmgr test in-memory databases and logs."
+ basic_repmgr_test $method $niter $tnum 1 1 0 0 0 $args
+}
+
diff --git a/db-4.8.30/test/repmgr022.tcl b/db-4.8.30/test/repmgr022.tcl
new file mode 100644
index 0000000..7893a8a
--- /dev/null
+++ b/db-4.8.30/test/repmgr022.tcl
@@ -0,0 +1,105 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+
+# TEST repmgr022
+# TEST Basic test of repmgr's multi-process master support.
+# TEST
+# TEST Set up a simple 2-site group, create data and replicate it.
+# TEST Add a second process at the master and have it write some
+# TEST updates. It does not explicitly start repmgr (nor do any
+# TEST replication configuration, for that matter). Its first
+# TEST update triggers initiation of connections, and so it doesn't
+# TEST get to the client without a log request. But later updates
+# TEST should go directly.
+
+proc repmgr022 { } {
+ source ./include.tcl
+ global rep_verbose
+ global verbose_type
+
+ set tnum "022"
+ puts "Repmgr$tnum: Basic repmgr multi-process master support."
+ set site_prog [setup_site_prog]
+
+ env_cleanup $testdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set ports [available_ports 2]
+ set master_port [lindex $ports 0]
+ set client_port [lindex $ports 1]
+
+ puts "\tRepmgr$tnum.a: Set up the master (on TCP port $master_port)."
+ set master [open "| $site_prog" "r+"]
+ fconfigure $master -buffering line
+ puts $master "home $masterdir"
+ puts $master "local $master_port"
+ make_dbconfig $masterdir {{rep_set_nsites 3}}
+ puts $master "output $testdir/m1output"
+ puts $master "open_env"
+ puts $master "start master"
+ error_check_match start_master [gets $master] "*Successful*"
+ puts $master "open_db test.db"
+ puts $master "put myKey myValue"
+
+ puts "\tRepmgr$tnum.b: Set up the client (on TCP port $client_port)."
+ set client [open "| $site_prog" "r+"]
+ fconfigure $client -buffering line
+ puts $client "home $clientdir"
+ puts $client "local $client_port"
+ make_dbconfig $clientdir {{rep_set_nsites 3}}
+ puts $client "output $testdir/coutput"
+ puts $client "open_env"
+ puts $client "remote localhost $master_port"
+ puts $client "start client"
+ error_check_match start_client [gets $client] "*Successful*"
+
+ puts "\tRepmgr$tnum.c: Wait for STARTUPDONE."
+ set clientenv [berkdb_env -home $clientdir]
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.d: Start a second process at master."
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "output $testdir/m2output"
+ puts $m2 "open_env"
+ puts $m2 "open_db test.db"
+ puts $m2 "put sub1 abc"
+ puts $m2 "echo firstputted"
+ set sentinel [gets $m2]
+ error_check_good m2_firstputted $sentinel "firstputted"
+ puts $m2 "put sub2 xyz"
+ puts $m2 "put sub3 ijk"
+ puts $m2 "put sub4 pqr"
+ puts $m2 "echo putted"
+ set sentinel [gets $m2]
+ error_check_good m2_putted $sentinel "putted"
+ puts $master "put another record"
+ puts $master "put and again"
+ puts $master "echo m1putted"
+ set sentinel [gets $master]
+ error_check_good m1_putted $sentinel "m1putted"
+
+ puts "\tRepmgr$tnum.e: Check that replicated data is visible at client."
+ puts $client "open_db test.db"
+ set expected {{myKey myValue} {sub1 abc} {sub2 xyz} {another record}}
+ verify_client_data $clientenv test.db $expected
+
+ # make sure there weren't too many rerequests
+ puts "\tRepmgr$tnum.f: Check rerequest stats"
+ set pfs [stat_field $clientenv rep_stat "Log records requested"]
+ error_check_good rerequest_count [expr $pfs <= 1] 1
+
+ puts "\tRepmgr$tnum.g: Clean up."
+ $clientenv close
+ close $client
+ close $master
+ close $m2
+}
diff --git a/db-4.8.30/test/repmgr023.tcl b/db-4.8.30/test/repmgr023.tcl
new file mode 100644
index 0000000..c47aa86
--- /dev/null
+++ b/db-4.8.30/test/repmgr023.tcl
@@ -0,0 +1,109 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr023
+# TEST Repmgr support for multi-process master.
+# TEST
+# TEST Start two processes at the master.
+# TEST Add a client site (not previously known to the master
+# TEST processes), and make sure
+# TEST both master processes connect to it.
+
+proc repmgr023 { } {
+ source ./include.tcl
+
+ set tnum "023"
+ puts "Repmgr$tnum: Two master processes both connect to a client."
+ set site_prog [setup_site_prog]
+
+ env_cleanup $testdir
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ set ports [available_ports 2]
+ set master_port [lindex $ports 0]
+ set client_port [lindex $ports 1]
+
+ puts "\tRepmgr$tnum.a: Set up the master (on TCP port $master_port)."
+ set master [open "| $site_prog" "r+"]
+ fconfigure $master -buffering line
+ puts $master "home $masterdir"
+ puts $master "local $master_port"
+ make_dbconfig $masterdir {{rep_set_nsites 3}}
+ puts $master "output $testdir/m1output"
+ puts $master "open_env"
+ puts $master "start master"
+ set ignored [gets $master]
+ puts $master "open_db test.db"
+ puts $master "put myKey myValue"
+
+ # sync.
+ puts $master "echo setup"
+ set sentinel [gets $master]
+ error_check_good echo_setup $sentinel "setup"
+
+ puts "\tRepmgr$tnum.b: Start a second process at master."
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "output $testdir/m2output"
+ puts $m2 "open_env"
+ puts $m2 "open_db test.db"
+ puts $m2 "put sub1 abc"
+ puts $m2 "echo firstputted"
+ set sentinel [gets $m2]
+ error_check_good m2_firstputted $sentinel "firstputted"
+
+ puts "\tRepmgr$tnum.c: Set up the client (on TCP port $client_port)."
+ set client [open "| $site_prog" "r+"]
+ fconfigure $client -buffering line
+ puts $client "home $clientdir"
+ puts $client "local $client_port"
+ make_dbconfig $clientdir {{rep_set_nsites 3}}
+ puts $client "output $testdir/coutput"
+ puts $client "open_env"
+ puts $client "remote localhost $master_port"
+ puts $client "start client"
+ error_check_match start_client [gets $client] "*Successful*"
+
+ puts "\tRepmgr$tnum.d: Wait for STARTUPDONE."
+ set clientenv [berkdb_env -home $clientdir]
+ await_startup_done $clientenv
+
+ # Initially there should be no rerequests.
+ set pfs1 [stat_field $clientenv rep_stat "Log records requested"]
+ error_check_good rerequest_count $pfs1 0
+
+ puts $m2 "put sub2 xyz"
+ puts $m2 "put sub3 ijk"
+ puts $m2 "put sub4 pqr"
+ puts $m2 "echo putted"
+ set sentinel [gets $m2]
+ error_check_good m2_putted $sentinel "putted"
+ puts $master "put another record"
+ puts $master "put and again"
+ puts $master "echo m1putted"
+ set sentinel [gets $master]
+ error_check_good m1_putted $sentinel "m1putted"
+
+ puts "\tRepmgr$tnum.e: Check that replicated data is visible at client."
+ puts $client "open_db test.db"
+ set expected {{myKey myValue} {sub1 abc} {sub2 xyz} {another record}}
+ verify_client_data $clientenv test.db $expected
+
+ # make sure there weren't too many rerequests
+ puts "\tRepmgr$tnum.f: Check rerequest stats"
+ set pfs [stat_field $clientenv rep_stat "Log records requested"]
+ error_check_good rerequest_count [expr $pfs <= 1] 1
+
+ puts "\tRepmgr$tnum.g: Clean up."
+ $clientenv close
+ close $client
+ close $master
+ close $m2
+}
diff --git a/db-4.8.30/test/repmgr024.tcl b/db-4.8.30/test/repmgr024.tcl
new file mode 100644
index 0000000..d809480
--- /dev/null
+++ b/db-4.8.30/test/repmgr024.tcl
@@ -0,0 +1,140 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr024
+# TEST Ensuring exactly one listener process.
+# TEST
+# TEST Start a repmgr process with a listener.
+# TEST Start a second process, and see that it does not become the listener.
+# TEST Shut down the first process (gracefully). Now a second process should
+# TEST become listener.
+# TEST Kill the listener process abruptly. Running failchk should show that
+# TEST recovery is necessary. Run recovery and start a clean listener.
+
+proc repmgr024 { } {
+ source ./include.tcl
+ source $test_path/testutils.tcl
+
+ set tnum "024"
+ puts "Repmgr$tnum: Ensuring exactly one listener process."
+ set site_prog [setup_site_prog]
+
+ env_cleanup $testdir
+
+ set masterdir $testdir/MASTERDIR
+
+ file mkdir $masterdir
+
+ set ports [available_ports 1]
+ set master_port [lindex $ports 0]
+
+ make_dbconfig $masterdir {{rep_set_nsites 3}}
+ set masterenv [berkdb_env -rep -txn -thread -home $masterdir \
+ -isalive my_isalive -create]
+ $masterenv close
+
+ puts "\tRepmgr$tnum.a: Set up the master (on TCP port $master_port)."
+ set master [open "| $site_prog" "r+"]
+ fconfigure $master -buffering line
+ puts $master "home $masterdir"
+ puts $master "local $master_port"
+ puts $master "output $testdir/m1output"
+ puts $master "open_env"
+ puts $master "start master"
+ error_check_match ok1 [gets $master] "*Successful*"
+
+ # sync.
+ puts $master "echo setup"
+ set sentinel [gets $master]
+ error_check_good echo_setup $sentinel "setup"
+
+ puts "\tRepmgr$tnum.b: Start a second process at master."
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "local $master_port"
+ puts $m2 "output $testdir/m2output"
+ puts $m2 "open_env"
+ puts $m2 "start master"
+ set ret [gets $m2]
+ error_check_match ignored "$ret" "*DB_REP_IGNORE*"
+
+ puts $m2 "echo started"
+ set sentinel [gets $m2]
+ error_check_good started $sentinel "started"
+
+ close $m2
+ close $master
+
+ # Hmm, actually it'd probably be better to send them an "exit" command,
+ # and then read until we get an EOF error. That we we're sure they've
+ # had a chance to finish the close operation. This is a recurring
+ # theme, doing stuff synchronously. There should be a way to wrap this
+ # up to make it the default behavior.
+
+ puts "\tRepmgr$tnum.c: Restart 2nd process, to act as listener this time"
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "local $master_port"
+ puts $m2 "output $testdir/m2output2"
+ puts $m2 "open_env"
+ puts $m2 "start master"
+ set answer [gets $m2]
+ error_check_match ok2 "$answer" "*Successful*"
+
+ puts "\tRepmgr$tnum.d: Clean up."
+ close $m2
+
+ puts "\tRepmgr$tnum.e: Start main process."
+ set master [open "| $site_prog" "r+"]
+ fconfigure $master -buffering line
+ puts $master "home $masterdir"
+ puts $master "local $master_port"
+ puts $master "output $testdir/m1output3"
+ puts $master "open_env"
+ puts $master "start master"
+ set answer [gets $master]
+ error_check_match ok3 $answer "*Successful*"
+
+ # This seems to require $KILL; tclkill does not work.
+ puts "\tRepmgr$tnum.f: Kill process [pid $master] without clean-up."
+ exec $KILL [pid $master]
+ catch {close $master}
+
+ # In realistic, correct operation, the application should have called
+ # failchk before trying to restart a new process. But let's just prove
+ # to ourselves that it's actually doing something. This first try
+ # should fail.
+ #
+ puts "\tRepmgr$tnum.g: Start take-over process without failchk."
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "local $master_port"
+ puts $m2 "output $testdir/m2output3"
+ puts $m2 "open_env"
+ puts $m2 "start master"
+ set answer [gets $m2]
+ error_check_match ignored3 $answer "*DB_REP_IGNORE*"
+ close $m2
+
+ set masterenv [berkdb_env -thread -home $masterdir -isalive my_isalive]
+ $masterenv failchk
+
+ # This time it should work.
+ puts "\tRepmgr$tnum.h: Start take-over process after failchk."
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "local $master_port"
+ puts $m2 "output $testdir/m2output4"
+ puts $m2 "open_env"
+ puts $m2 "start master"
+ set answer [gets $m2]
+ error_check_match ok4 $answer "*Successful*"
+
+ close $m2
+ $masterenv close
+}
diff --git a/db-4.8.30/test/repmgr025.tcl b/db-4.8.30/test/repmgr025.tcl
new file mode 100644
index 0000000..f5c025a
--- /dev/null
+++ b/db-4.8.30/test/repmgr025.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+
+# TEST repmgr025
+# TEST Repmgr site address discovery via handshakes.
+# TEST
+# TEST Master with 2 processes, does not know client's address.
+# TEST Client processes start in either order, connect to master.
+# TEST Master learns of client's address via handshake message.
+
+proc repmgr025 { } {
+ repmgr025_sub yes
+ repmgr025_sub no
+}
+
+# We can either have the master's subordinate process discover the client
+# address "on the fly" (i.e., in the course of doing a transaction), or more
+# delicately by not starting it until we know that the address is in the
+# shared region ready to be discovered. In the delicate case, we expect no perm
+# failures, but otherwise there's almost always one.
+#
+proc repmgr025_sub { on_the_fly } {
+ source ./include.tcl
+
+ set tnum "025"
+ if { $on_the_fly } {
+ set msg "on the fly"
+ } else {
+ set msg "regular"
+ }
+ puts "Repmgr$tnum: Site address discovery via handshakes ($msg)"
+ set site_prog [setup_site_prog]
+
+ env_cleanup $testdir
+
+ foreach {master_port client_port} [available_ports 2] {
+ puts "\tRepmgr$tnum.a:\
+ Using ports $master_port, $client_port"
+ }
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "\tRepmgr$tnum.b: Start 2 processes at master."
+ make_dbconfig $masterdir {{rep_set_nsites 3}}
+ set m1 [open "| $site_prog" "r+"]
+ fconfigure $m1 -buffering line
+ puts $m1 "home $masterdir"
+ puts $m1 "local $master_port"
+ puts $m1 "output $testdir/m1output"
+ puts $m1 "open_env"
+ puts $m1 "start master"
+ gets $m1
+
+ if {$on_the_fly} {
+ set m2 [open "| $site_prog 2>erroutp" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "local $master_port"
+ puts $m2 "output $testdir/m2output"
+ puts $m2 "open_env"
+ puts $m2 "start master"
+ gets $m2
+ }
+
+ puts "\tRepmgr$tnum.c: Start 1st client process, but not connected."
+ make_dbconfig $clientdir {{rep_set_nsites 3}}
+ set c1 [open "| $site_prog 2> c1stderr" "r+"]
+ fconfigure $c1 -buffering line
+ puts $c1 "home $clientdir"
+ puts $c1 "local $client_port"
+ puts $c1 "output $testdir/c1output"
+ puts $c1 "open_env"
+ puts $c1 "start client"
+ gets $c1
+
+ # Obviously, at this point the two sites aren't connected, since neither
+ # one has been told about the other.
+
+ puts "\tRepmgr$tnum.d: Start 2nd client process, connecting to master."
+ set c2 [open "| $site_prog" "r+"]
+ fconfigure $c2 -buffering line
+ puts $c2 "home $clientdir"
+ puts $c2 "local $client_port"
+ puts $c2 "remote localhost $master_port"
+ puts $c2 "output $testdir/c2output"
+ puts $c2 "open_env"
+ puts $c2 "start client"
+ gets $c2
+
+ # At this point, c2 will connect to the master. (The truly observant
+ # reader will notice that there's essentially no point to this
+ # connection, at least as long as this site remains a client. But if it
+ # ever becomes master, this connection will be ready to go, for sending
+ # log records from live updates originating at that process.)
+ #
+ # The master should extract the site address from the handshake,
+ # recognize that this is a subordinate connection, and therefore
+ # initiate an outgoing connection to the client. It also of course
+ # stashes the site's address in the shared region, so that any
+ # subordinate processes (m2) can find it.
+ #
+ # Check site list, to make sure A discovers B's network address. Then
+ # wait for startup-done.
+ #
+ set cond {
+ set masterenv [berkdb_env -home $masterdir]
+ set msl [$masterenv repmgr_site_list]
+ $masterenv close
+ expr {[llength $msl] == 1 && [lindex $msl 0 2] == $client_port}
+ }
+ await_condition {[eval $cond]} 20
+
+ set clientenv [berkdb_env -home $clientdir]
+ await_startup_done $clientenv
+
+ if {!$on_the_fly} {
+ set m2 [open "| $site_prog 2>erroutp" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "local $master_port"
+ puts $m2 "output $testdir/m2output"
+ puts $m2 "open_env"
+ puts $m2 "start master"
+ gets $m2
+ tclsleep 2
+ }
+
+ puts $m2 "open_db test.db"
+ puts $m2 "put k1 v1"
+ puts $m2 "echo done"
+ gets $m2
+
+ # make sure there weren't too many perm failures
+ puts "\tRepmgr$tnum.e: Check stats"
+ set get_pfs { expr [stat_field \
+ $clientenv repmgr_stat "Acknowledgement failures"] }
+ set pfs [eval $get_pfs]
+
+ if {$on_the_fly} {
+ set max 1
+ } else {
+ set max 0
+ }
+ if {$pfs > $max} {
+ error "FAIL: too many perm failures"
+ }
+
+ puts $m1 "open_db test.db"
+ puts $m1 "put k2 v2"
+ puts $m1 "echo done"
+ gets $m1
+
+ puts "\tRepmgr$tnum.f: Check that replicated data is visible at client."
+ set expected {{k1 v1} {k2 v2}}
+ verify_client_data $clientenv test.db $expected
+
+ # make sure there were no additional perm failures
+ puts "\tRepmgr$tnum.g: Check stats again"
+ set pfs2 [eval $get_pfs]
+ error_check_good subsequent $pfs2 $pfs
+
+ puts "\tRepmgr$tnum.h: Clean up."
+ $clientenv close
+ close $c1
+ close $c2
+ close $m1
+ close $m2
+}
diff --git a/db-4.8.30/test/repmgr026.tcl b/db-4.8.30/test/repmgr026.tcl
new file mode 100644
index 0000000..cb2b202
--- /dev/null
+++ b/db-4.8.30/test/repmgr026.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr026
+# TEST Repmgr site address discovery via NEWSITE
+# TEST
+# TEST New client, previously unknown to (main) master process, connects to an
+# TEST existing client, which broadcasts NEWSITE message.
+# TEST This causes master to discover its address, and connect to it.
+# TEST Other new clients may also have been added to master's configuration in
+# TEST the interim (via a subordinate master process).
+
+proc repmgr026 { } {
+ foreach sitelist { {} {2} {3 2} {3 2 4} {3} } {
+ repmgr026_sub $sitelist
+ }
+}
+
+proc repmgr026_sub { extras } {
+ source ./include.tcl
+
+ set tnum "026"
+ puts "Repmgr$tnum:\
+ Repmgr and NEWSITE, with these extra clients: {$extras}"
+ set site_prog [setup_site_prog]
+
+ env_cleanup $testdir
+
+ set ports [available_ports 5]
+ set master_port [lindex $ports 0]
+ set portB [lindex $ports 1]
+ set portC [lindex $ports 2]
+
+ set i 0
+ foreach site_id "A B C D E" {
+ set d "$testdir/$site_id"
+ set dirs($i) $d
+ file mkdir $d
+ incr i
+ }
+ set masterdir $dirs(0)
+ set dirB $dirs(1)
+ set dirC $dirs(2)
+
+ puts "\tRepmgr$tnum.a: Start 2 processes at master."
+ make_dbconfig $masterdir {{rep_set_nsites 5}
+ {rep_set_timeout DB_REP_CONNECTION_RETRY 300000000}}
+ set m1 [open "| $site_prog" "r+"]
+ fconfigure $m1 -buffering line
+ puts $m1 "home $masterdir"
+ puts $m1 "local $master_port"
+ puts $m1 "output $testdir/m1output"
+ puts $m1 "open_env"
+ puts $m1 "start master"
+ gets $m1
+
+ set m2 [open "| $site_prog" "r+"]
+ fconfigure $m2 -buffering line
+ puts $m2 "home $masterdir"
+ puts $m2 "output $testdir/m2output"
+ puts $m2 "open_env"
+ puts $m2 "echo done"
+ gets $m2
+
+ puts "\tRepmgr$tnum.b: Start client B, connecting to master."
+ make_dbconfig $dirB {{rep_set_nsites 5}}
+ set b [open "| $site_prog" "r+"]
+ fconfigure $b -buffering line
+ puts $b "home $dirB"
+ puts $b "local $portB"
+ puts $b "remote localhost $master_port"
+ puts $b "output $testdir/boutput"
+ puts $b "open_env"
+ puts $b "start client"
+ gets $b
+
+ set envB [berkdb_env -home $dirB]
+ await_startup_done $envB
+
+ # Add some newly arrived client configurations to the master, but do it
+ # via the subordinate process A2, so that the main process doesn't
+ # notice them until it gets the NEWSITE message. Note that these
+ # clients aren't running yet.
+ #
+ puts "\tRepmgr$tnum.c: Add new client addresses at master."
+ foreach client_index $extras {
+ puts $m2 "remote localhost [lindex $ports $client_index]"
+ if {$client_index == 2} {
+ # Start client C last, below.
+ continue;
+ }
+ make_dbconfig $dirs($client_index) {{rep_set_nsites 5}}
+ set x [open "| $site_prog" "r+"]
+ fconfigure $x -buffering line
+ puts $x "home $dirs($client_index)"
+ puts $x "local [lindex $ports $client_index]"
+ puts $x "output $testdir/c${client_index}output"
+ puts $x "open_env"
+ puts $x "start client"
+ gets $x
+ set sites($client_index) $x
+ }
+ puts $m2 "echo done"
+ gets $m2
+
+ # Start client C, triggering the NEWSITE mechanism at the master.
+ #
+ puts "\tRepmgr$tnum.d: Start client C, connecting first only to B."
+ make_dbconfig $dirC {{rep_set_nsites 5}}
+ set c [open "| $site_prog" "r+"]
+ fconfigure $c -buffering line
+ puts $c "home $dirC"
+ puts $c "local $portC"
+ puts $c "remote localhost $portB"
+ puts $c "output $testdir/coutput"
+ puts $c "open_env"
+ puts $c "start client"
+ gets $c
+
+ # First check for startup-done at site C.
+ #
+ set envC [berkdb_env -home $dirC]
+ await_startup_done $envC 15
+ $envC close
+
+ # Then check for startup-done at any other clients.
+ #
+ foreach client_index $extras {
+ if {$client_index == 2} {
+ continue;
+ }
+ set envx [berkdb_env -home $dirs($client_index)]
+ await_startup_done $envx 20
+ $envx close
+ }
+
+ # If we've gotten this far, we know that everything must have worked
+ # fine, because otherwise the clients wouldn't have been able to
+ # complete their start-up. But let's check the master's site list
+ # anyway, to make sure it knows about site C at the list index location
+ # we think should be correct.
+ #
+ # Site C's address appears at the position indicated in the "extras"
+ # list, or right after that if it didn't appear in extras.
+ #
+ set pos [lsearch -exact $extras 2]
+ if {$pos == -1} {
+ set pos [llength $extras]
+ }
+ incr pos; # make allowance for site B which is
+ # always there
+
+ puts "\tRepmgr$tnum.e: Check address lists."
+ set masterenv [berkdb_env -home $masterdir]
+ error_check_good master_knows_clientC \
+ [lindex [$masterenv repmgr_site_list] $pos 2 ] $portC
+
+ set envC [berkdb_env -home $dirC]
+ error_check_good C_knows_master \
+ [lindex [$envC repmgr_site_list] 1 2] $master_port
+
+ puts "\tRepmgr$tnum.f: Clean up."
+ $envC close
+ $envB close
+ $masterenv close
+
+ close $c
+
+ foreach client_index $extras {
+ if {$client_index == 2} {
+ continue;
+ }
+ close $sites($client_index)
+ }
+ close $b
+ close $m2
+ close $m1
+}
diff --git a/db-4.8.30/test/repmgr027.tcl b/db-4.8.30/test/repmgr027.tcl
new file mode 100644
index 0000000..fe4181f
--- /dev/null
+++ b/db-4.8.30/test/repmgr027.tcl
@@ -0,0 +1,216 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr027
+# TEST Repmgr recognition of peer setting, across processes.
+# TEST
+# TEST Set up a master and two clients, synchronized with some data.
+# TEST Add a new client, configured to use c2c sync with one of the original
+# TEST clients. Check stats to make sure the correct c2c peer was used.
+
+proc repmgr027 { } {
+ repmgr027_sub position_chg
+ repmgr027_sub chg_site
+ repmgr027_sub chg_after_open
+ repmgr027_sub set_peer_after_open
+}
+
+proc repmgr027_sub { config } {
+ source ./include.tcl
+
+ set tnum "027"
+ puts "Repmgr$tnum: Repmgr peer, with \"$config\" configuration."
+ set site_prog [setup_site_prog]
+
+ env_cleanup $testdir
+
+ set ports [available_ports 4]
+ set mport [lindex $ports 0]
+ set portA [lindex $ports 1]
+ set portB [lindex $ports 2]
+
+ file mkdir [set masterdir $testdir/MASTER]
+ file mkdir $testdir/A
+ file mkdir $testdir/B
+ file mkdir $testdir/C
+
+ puts "\tRepmgr$tnum.a: Start master, write some data."
+ make_dbconfig $masterdir {{rep_set_nsites 4}}
+ set cmds {
+ "home $masterdir"
+ "local $mport"
+ "output $testdir/moutput"
+ "open_env"
+ "start master"
+ "open_db test.db"
+ "put key1 value1"
+ }
+ set m [open_site_prog [subst $cmds]]
+
+ puts "\tRepmgr$tnum.b:\
+ Start initial two clients; wait for them to synchronize."
+ # Allowing both A and B to start at the same time, and synchronize
+ # concurrently would make sense. But it causes very slow performance on
+ # Windows. Since it's really only client C that's under test here, this
+ # detail doesn't matter.
+ #
+ make_dbconfig $testdir/A {{rep_set_nsites 4}}
+ set a [open_site_prog [list \
+ "home $testdir/A" \
+ "local $portA" \
+ "output $testdir/aoutput" \
+ "remote localhost $mport" \
+ "open_env" \
+ "start client"]]
+ set env [berkdb_env -home $testdir/A]
+ await_startup_done $env
+ $env close
+
+ make_dbconfig $testdir/B {{rep_set_nsites 4}}
+ set b [open_site_prog [list \
+ "home $testdir/B" \
+ "local $portB" \
+ "output $testdir/boutput" \
+ "remote localhost $mport" \
+ "open_env" \
+ "start client"]]
+ set env [berkdb_env -home $testdir/B]
+ await_startup_done $env
+ $env close
+
+ # Client C is the one whose behavior is being tested. It has two
+ # processes. "c" will be the main replication process, and "c2" the
+ # subordinate process. The initial configuration commands used to set
+ # up the two processes vary slightly with each test. The variable
+ # $config contains the name of the proc which will fill out the
+ # configuration information appropriately for each test variant.
+ #
+ puts "\tRepmgr$tnum.c: Start client under test."
+ make_dbconfig $testdir/C {{rep_set_nsites 4}}
+
+ set c2 [list \
+ "home $testdir/C" \
+ "local [lindex $ports 3]" \
+ "output $testdir/c2output" \
+ "open_env"]
+ set c [list \
+ "home $testdir/C" \
+ "local [lindex $ports 3]" \
+ "output $testdir/coutput" \
+ "open_env"]
+ set lists [repmgr027_$config $c2 $c]
+ set c2 [lindex $lists 0]
+ set c [lindex $lists 1]
+
+ # Ugly hack: in this one case, the order of opening the two client
+ # processes has to be reversed.
+ #
+ if {$config == "chg_after_open"} {
+ set c [open_site_prog $c]
+ set c2 [open_site_prog $c2]
+ } else {
+ set c2 [open_site_prog $c2]
+ set c [open_site_prog $c]
+ }
+ puts $c "start client"
+ gets $c
+
+ puts "\tRepmgr$tnum.d: Wait for startup-done at test client."
+ set env [berkdb_env -home $testdir/C]
+ await_startup_done $env 27
+ $env close
+
+ puts "\tRepmgr$tnum.e: Check stats to make sure proper peer was used."
+ set env [berkdb_env -home $testdir/A]
+ set reqs [stat_field $env rep_stat "Client service requests"]
+ error_check_good used_client_A [expr {$reqs > 0}] 1
+ $env close
+ set env [berkdb_env -home $testdir/B]
+ set reqs [stat_field $env rep_stat "Client service requests"]
+ error_check_good didnt_use_b [expr {$reqs == 0}] 1
+ $env close
+
+ puts "\tRepmgr$tnum.f: Clean up."
+ close $c2
+ close $c
+ close $b
+ close $a
+ close $m
+}
+
+# Scenario 1: client A is the peer; C2 sets B, A; C sets A. For C, this means
+# no peer change, but its position in the list changes, requiring some tricky
+# shuffling.
+#
+proc repmgr027_position_chg { c2 c } {
+ set remote_config [uplevel 1 {list \
+ "remote localhost $mport" \
+ "remote localhost $portB" \
+ "remote -p localhost $portA"}]
+ set i [lsearch -exact $c2 "open_env"]
+
+ # It should be found, in the middle somewhere, or this will break.
+ set c2 "[lrange $c2 0 [expr $i - 1]] $remote_config [lrange $c2 $i end]"
+
+ set remote_config [uplevel 1 {list \
+ "remote -p localhost $portA" \
+ "remote localhost $mport"}]
+ set i [lsearch -exact $c "open_env"]
+ set c "[lrange $c 0 [expr $i - 1]] $remote_config [lrange $c $i end]"
+
+ return [list $c2 $c]
+}
+
+# C2 first sets the peer as B, but then C comes along and changes it to A.
+#
+proc repmgr027_chg_site { c2 c } {
+ set remote_config [uplevel 1 {list \
+ "remote localhost $mport" \
+ "remote -p localhost $portB"}]
+ set i [lsearch -exact $c2 "open_env"]
+
+ # It should be found, in the middle somewhere, or this will break.
+ set c2 "[lrange $c2 0 [expr $i - 1]] $remote_config [lrange $c2 $i end]"
+
+ set remote_config [uplevel 1 {list \
+ "remote -p localhost $portA" \
+ "remote localhost $mport"}]
+ set i [lsearch -exact $c "open_env"]
+ set c "[lrange $c 0 [expr $i - 1]] $remote_config [lrange $c $i end]"
+
+ return [list $c2 $c]
+}
+
+# C first sets B as its peer, and creates the env. Then C2 comes along and
+# changes it to A. C will have to learn of the change on the fly, rather than
+# at env open/join time. Even though the actual order of process creation will
+# be reversed (by the caller), we still conform to the convention of putting C2
+# first, and then C, in the ordered list.
+#
+proc repmgr027_chg_after_open { c2 c } {
+ set remote_config [uplevel 1 {list \
+ "remote localhost $mport" \
+ "remote -p localhost $portA"}]
+ set i [lsearch -exact $c2 "open_env"]
+
+ # It should be found, in the middle somewhere, or this will break.
+ set c2 "[lrange $c2 0 [expr $i - 1]] $remote_config [lrange $c2 $i end]"
+
+ set remote_config [uplevel 1 {list \
+ "remote -p localhost $portB" \
+ "remote localhost $mport"}]
+ set i [lsearch -exact $c "open_env"]
+ set c "[lrange $c 0 [expr $i - 1]] $remote_config [lrange $c $i end]"
+
+ return [list $c2 $c]
+}
+
+# Nothing especially exotic here, except this exercises a code path where I
+# previously discovered a bug.
+#
+proc repmgr027_set_peer_after_open { c2 c } {
+ set remote_config [uplevel 1 {subst "remote -p localhost $portA"}]
+ lappend c $remote_config
+ return [list $c2 $c]
+}
diff --git a/db-4.8.30/test/repmgr028.tcl b/db-4.8.30/test/repmgr028.tcl
new file mode 100644
index 0000000..c12fd17
--- /dev/null
+++ b/db-4.8.30/test/repmgr028.tcl
@@ -0,0 +1,130 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr028
+# TEST Simple smoke test for repmgr elections with multi-process envs.
+
+proc repmgr028 { } {
+ source ./include.tcl
+
+ set tnum "028"
+ puts "Repmgr$tnum:\
+ Smoke test for repmgr elections with multi-process envs."
+
+ env_cleanup $testdir
+
+ set ports [available_ports 3]
+
+ set dbconfig {
+ {rep_set_nsites 3}
+ {rep_set_timeout DB_REP_ELECTION_RETRY 2000000}
+ {rep_set_timeout DB_REP_ELECTION_TIMEOUT 1000000}
+ }
+ foreach d {A B C} {
+ file mkdir $testdir/$d
+ make_dbconfig $testdir/$d $dbconfig
+ }
+
+ puts "\tRepmgr$tnum.a: Start 3 sites (with 2 processes each)."
+ set cmds {
+ {home $testdir/A}
+ {local [lindex $ports 0]}
+ {open_env}
+ {start election}
+ }
+ set cmds [subst $cmds]
+ set a1 [open_site_prog [linsert $cmds 1 "output $testdir/a1output"]]
+ set a2 [open_site_prog [linsert $cmds 1 "output $testdir/a2output"]]
+
+ set cmds {
+ {home $testdir/B}
+ {local [lindex $ports 1]}
+ {remote localhost [lindex $ports 0]}
+ {open_env}
+ {start election}
+ }
+ set cmds [subst $cmds]
+ set b1 [open_site_prog [linsert $cmds 1 "output $testdir/b1output"]]
+ set b2 [open_site_prog [linsert $cmds 1 "output $testdir/b2output"]]
+
+ set cmds {
+ {home $testdir/C}
+ {local [lindex $ports 2]}
+ {remote localhost [lindex $ports 0]}
+ {open_env}
+ {start election}
+ }
+ set cmds [subst $cmds]
+ set c1 [open_site_prog [linsert $cmds 1 "output $testdir/c1output"]]
+ set c2 [open_site_prog [linsert $cmds 1 "output $testdir/c2output"]]
+
+ puts "\tRepmgr$tnum.b: Wait for an election to choose initial master."
+ set a [berkdb_env -home $testdir/A]
+ set b [berkdb_env -home $testdir/B]
+ set c [berkdb_env -home $testdir/C]
+ set sites "$a $b $c"
+ set menv [repmgr028_await_election $sites]
+ set i [lsearch -exact $sites $menv]
+ error_check_bad notfound $i -1
+ set site_names "abc"
+ set m [string range $site_names $i $i]
+ puts "\tRepmgr$tnum.c: (site $m is master)."
+
+ puts "\tRepmgr$tnum.d: Wait for other two sites to sync up."
+ set clients [lreplace $sites $i $i]
+ set site_names [string replace $site_names $i $i]
+ await_startup_done [lindex $clients 0]
+ await_startup_done [lindex $clients 1]
+
+ set m1 [subst $${m}1]
+ set m2 [subst $${m}2]
+
+ puts $m2 "open_db test.db"
+ puts $m2 "put key1 value1"
+ puts $m2 "echo done"
+ gets $m2
+
+ puts "\tRepmgr$tnum.e:\
+ Shut down master, wait for survivors to elect new master."
+ $menv close
+ close $m1
+ close $m2
+
+ set menv [repmgr028_await_election $clients]
+ set i [lsearch -exact $clients $menv]
+ error_check_bad notfound2 $i -1
+ set m [string range $site_names $i $i]
+ puts "\tRepmgr$tnum.f: (site $m is new master)."
+
+ puts "\tRepmgr$tnum.g: Wait for remaining client to sync to new master."
+ set client [lreplace $clients $i $i]
+ error_check_good master_changes \
+ [stat_field $client rep_stat "Master changes"] 2
+ await_startup_done $client
+
+ puts "\tRepmgr$tnum.h: Clean up."
+ $client close
+ $menv close
+
+ set c [string range $site_names 0 0]
+ close [subst $${c}1]
+ close [subst $${c}2]
+ set c [string range $site_names 1 1]
+ close [subst $${c}1]
+ close [subst $${c}2]
+}
+
+proc repmgr028_await_election { env_list } {
+ set cond {
+ foreach e $env_list {
+ if {[stat_field $e rep_stat "Role"] == "master"} {
+ set answer $e
+ break
+ }
+ }
+ expr {[info exists answer]}
+ }
+ await_condition {[eval $cond]} 20
+ return $answer
+}
diff --git a/db-4.8.30/test/repmgr029.tcl b/db-4.8.30/test/repmgr029.tcl
new file mode 100644
index 0000000..fdea674
--- /dev/null
+++ b/db-4.8.30/test/repmgr029.tcl
@@ -0,0 +1,122 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr029
+# TEST Repmgr combined with replication-unaware process at master.
+
+proc repmgr029 { } {
+ source ./include.tcl
+
+ set tnum "029"
+ puts "Repmgr$tnum: Replication-unaware process at master."
+
+ env_cleanup $testdir
+ set ports [available_ports 2]
+ foreach {mport cport} $ports {}
+
+ file mkdir [set mdir $testdir/MASTER]
+ file mkdir [set cdir $testdir/CLIENT]
+
+ puts "\tRepmgr$tnum.a: Set up simple master/client pair."
+ make_dbconfig $mdir [set dbconfig {{rep_set_nsites 3}}]
+ set cmds {
+ "home $mdir"
+ "local $mport"
+ "output $testdir/moutput"
+ "open_env"
+ "start master"
+ "open_db test.db"
+ "put k1 v1"
+ "put k2 v2"
+ }
+ set m [open_site_prog [subst $cmds]]
+
+ make_dbconfig $cdir $dbconfig
+ set cmds {
+ "home $cdir"
+ "local $cport"
+ "output $testdir/coutput"
+ "remote localhost $mport"
+ "open_env"
+ "start client"
+ }
+ set c [open_site_prog [subst $cmds]]
+
+ puts "\tRepmgr$tnum.b: Wait for client to finish start-up."
+ set cenv [berkdb_env -home $cdir]
+ await_startup_done $cenv
+
+ puts "\tRepmgr$tnum.c: Run checkpoint in a separate process."
+ exec $util_path/db_checkpoint -h $mdir -1
+
+ # Find out where the checkpoint record is.
+ #
+ set menv [berkdb_env -home $mdir]
+ set curs [$menv log_cursor]
+ set ckp_lsn1 [lindex [$curs get -last] 0]
+
+ puts "\tRepmgr$tnum.d: Write more log records at master."
+ puts $m "put k3 v3"
+ puts $m "put k4 v4"
+ puts $m "echo done"
+ gets $m
+
+ puts "\tRepmgr$tnum.e: Do another checkpoint."
+ exec $util_path/db_checkpoint -h $mdir -1
+ set ckp_lsn2 [lindex [$curs get -last] 0]
+
+ error_check_bad same_ckp_lsn $ckp_lsn2 $ckp_lsn1
+
+ # db_checkpoint could have produced perm failures, because it doesn't
+ # start repmgr explicitly. Instead repmgr starts up automatically, on
+ # the fly, by trapping the first transmitted log record that gets sent.
+ # This causes a connection to be initiated, but that may take some time,
+ # too much time for that first log record to be transmitted. This means
+ # the client will have to request retransmission of this log record
+ # "gap".
+ #
+ # So, pause for a moment, to let replication's gap measurement algorithm
+ # expire, and then send one more transaction from the master, so that
+ # the client is forced to request the gap if necessary.
+ #
+ set perm_failures "Acknowledgement failures"
+ set pfs1 [stat_field $menv repmgr_stat $perm_failures]
+ tclsleep 1
+
+ puts $m "put k5 v5"
+ puts $m "echo done"
+ gets $m
+ set pfs2 [stat_field $menv repmgr_stat $perm_failures]
+
+ # The last "put" operation shouldn't have resulted in any additional
+ # perm failures.
+ #
+ error_check_good perm_fail $pfs2 $pfs1
+
+ # Pause again to allow time for the request for retransmission to be
+ # fulfilled.
+ #
+ tclsleep 1
+
+ # At this point that both checkpoint operations should have been
+ # successfully replicated. Examine the client-side log at the expected
+ # LSNs.
+ #
+ puts "\tRepmgr$tnum.f: Examine client log."
+ foreach lsn [list $ckp_lsn1 $ckp_lsn2] {
+ set lsnarg [join $lsn /]
+ set listing [exec $util_path/db_printlog \
+ -h $cdir -b $lsnarg -e $lsnarg]
+
+ set first_line [lindex [split $listing "\n"] 0]
+ error_check_good found_ckp \
+ [string match "*__txn_ckp*" $first_line] 1
+ }
+
+ $curs close
+ $cenv close
+ $menv close
+ close $c
+ close $m
+}
diff --git a/db-4.8.30/test/repmgr030.tcl b/db-4.8.30/test/repmgr030.tcl
new file mode 100644
index 0000000..105f2b0
--- /dev/null
+++ b/db-4.8.30/test/repmgr030.tcl
@@ -0,0 +1,97 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr030
+# TEST Subordinate connections and processes should not trigger elections.
+
+proc repmgr030 { } {
+ source ./include.tcl
+
+ set tnum "030"
+ puts "Repmgr$tnum: Subordinate\
+ connections and processes should not trigger elections."
+
+ env_cleanup $testdir
+
+ foreach {mport cport} [available_ports 2] {}
+ file mkdir [set mdir $testdir/MASTER]
+ file mkdir [set cdir $testdir/CLIENT]
+
+ make_dbconfig $mdir [set dbconfig {{rep_set_nsites 3}}]
+ make_dbconfig $cdir $dbconfig
+
+ puts "\tRepmgr$tnum.a: Set up a pair of sites, two processes each."
+ set cmds {
+ "home $mdir"
+ "local $mport"
+ "output $testdir/m1output"
+ "open_env"
+ "start master"
+ }
+ set m1 [open_site_prog [subst $cmds]]
+
+ set cmds {
+ "home $mdir"
+ "local $mport"
+ "output $testdir/m2output"
+ "open_env"
+ "start master"
+ }
+ set m2 [open_site_prog [subst $cmds]]
+
+ # Force subordinate client process to be the one to inform master of its
+ # address, to be sure there's a connection. This shouldn't be
+ # necessary, but it's hard to verify this in a test.
+ #
+ set cmds {
+ "home $cdir"
+ "local $cport"
+ "output $testdir/c1output"
+ "open_env"
+ "start client"
+ }
+ set c1 [open_site_prog [subst $cmds]]
+
+ set cmds {
+ "home $cdir"
+ "local $cport"
+ "output $testdir/c2output"
+ "remote localhost $mport"
+ "open_env"
+ "start client"
+ }
+ set c2 [open_site_prog [subst $cmds]]
+
+ set cenv [berkdb_env -home $cdir]
+ await_startup_done $cenv
+
+ puts "\tRepmgr$tnum.b: Stop master's subordinate process (pause)."
+ close $m2
+
+ # Pause to let client notice the connection loss.
+ tclsleep 3
+
+ # The client main process is still running, but it shouldn't care about
+ # a connection loss to the master's subordinate process.
+
+ puts "\tRepmgr$tnum.c:\
+ Stop client's main process, then master's main process (pause)."
+ close $c1
+ tclsleep 2
+ close $m1
+ tclsleep 3
+
+ # If the client main process were still running, it would have reacted
+ # to the loss of the master by calling for an election. However, with
+ # only the client subordinate process still running, he cannot call for
+ # an election. So, we should see no elections ever having been
+ # started.
+ #
+ set election_count [stat_field $cenv rep_stat "Elections held"]
+ puts "\tRepmgr$tnum.d: Check election count ($election_count)."
+ error_check_good no_elections $election_count 0
+
+ $cenv close
+ close $c2
+}
diff --git a/db-4.8.30/test/repmgr031.tcl b/db-4.8.30/test/repmgr031.tcl
new file mode 100644
index 0000000..f50a179
--- /dev/null
+++ b/db-4.8.30/test/repmgr031.tcl
@@ -0,0 +1,209 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr031
+# TEST Test repmgr's internal juggling of peer EID's.
+# TEST
+# TEST Set up master and 2 clients, A and B.
+# TEST Add a third client (C), with two processes.
+# TEST The first process will be configured to know about A.
+# TEST The second process will know about B, and set that as peer,
+# TEST but when it joins the env site B will have to be shuffled
+# TEST into a later position in the list, because A is already first.
+
+# This whole test is highly dependent upon the internal implementation structure
+# of repmgr's multi-process support. If that implementation changes, this test
+# may become irrelevant, irrational, and inconsequential. If that happens, it
+# makes sense to simply discard this test.
+
+proc repmgr031 { } {
+ foreach initial_action {true false} {
+ foreach while_active {true false} {
+ repmgr031_sub $initial_action $while_active
+ }
+ }
+}
+
+proc repmgr031_sub { {a_too false} {while_active true} } {
+ source ./include.tcl
+
+ if {$a_too} {
+ set part1 "shuffle with peer reassignment"
+ } else {
+ set part1 "shuffle"
+ }
+ if {$while_active} {
+ set part2 "while active"
+ } else {
+ set part2 "while not active"
+ }
+ set tnum "031"
+ puts "Repmgr$tnum: ($part1, then peer change $part2)"
+
+ env_cleanup $testdir
+ foreach {mport aport bport cport} [available_ports 4] {}
+ file mkdir [set dirm $testdir/M]
+ file mkdir [set dira $testdir/A]
+ file mkdir [set dirb $testdir/B]
+ file mkdir [set dirc $testdir/C]
+
+ set dbc {{repmgr_set_ack_policy DB_REPMGR_ACKS_ALL}}
+ make_dbconfig $dirm $dbc
+ make_dbconfig $dira $dbc
+ make_dbconfig $dirb $dbc
+ make_dbconfig $dirc $dbc
+
+ puts "\tRepmgr$tnum.a: Create a master and first two clients."
+ set cmds {
+ "home $dirm"
+ "local $mport"
+ "output $testdir/moutput"
+ "open_env"
+ "start master"
+ }
+ set m [open_site_prog [subst $cmds]]
+
+ set cmds {
+ "home $dira"
+ "local $aport"
+ "output $testdir/aoutput"
+ "remote localhost $mport"
+ "open_env"
+ "start client"
+ }
+ set a [open_site_prog [subst $cmds]]
+
+ set cmds {
+ "home $dirb"
+ "local $bport"
+ "output $testdir/boutput"
+ "remote localhost $mport"
+ "open_env"
+ "start client"
+ }
+ set b [open_site_prog [subst $cmds]]
+
+ set aenv [berkdb_env -home $dira]
+ await_startup_done $aenv
+ set benv [berkdb_env -home $dirb]
+ await_startup_done $benv
+
+ # Now it gets interesting.
+ puts "\tRepmgr$tnum.b: Create client C, with two processes."
+ if {$a_too} {
+ set peer_flag "-p"
+ } else {
+ set peer_flag ""
+ }
+ set cmds {
+ "home $dirc"
+ "local $cport"
+ "output $testdir/c1output"
+ "remote $peer_flag localhost $aport"
+ "remote localhost $mport"
+ "open_env"
+ }
+ set c1 [open_site_prog [subst $cmds]]
+
+ set cmds {
+ "home $dirc"
+ "local $cport"
+ "output $testdir/c2output"
+ "remote -p localhost $bport"
+ "open_env"
+ }
+ set c2 [open_site_prog [subst $cmds]]
+
+ puts $c1 "start client"
+ gets $c1
+ set cenv [berkdb_env -home $dirc]
+ await_startup_done $cenv 10
+
+ puts "\tRepmgr$tnum.c: Check resulting statistics."
+ # Make sure we used B, not A, as the c2c peer.
+ set requests_at_A [repmgr031_get_request_count $aenv]
+ set requests_at_B [repmgr031_get_request_count $benv]
+ error_check_good no_requests_at_A $requests_at_A 0
+ error_check_bad some_requests_at_B $requests_at_B 0
+
+ # Check that site list order is what we expect.
+ set sl [$cenv repmgr_site_list]
+ error_check_good site_list [lindex $sl 0 2] $aport
+ error_check_good site_list [lindex $sl 1 2] $mport
+ error_check_good site_list [lindex $sl 2 2] $bport
+
+
+ # Give client C a reason to send another request: shut it down, and
+ # create some new transactions at the master.
+ #
+ puts $c2 "exit"
+ gets $c2
+ close $c2
+ puts $c1 "exit"
+ gets $c1
+ close $c1
+
+ puts $m "open_db test.db"
+ puts $m "put k1 v1"
+ puts $m "put k2 v2"
+ puts $m "echo done"
+ gets $m
+
+ # Change peer setting at C.
+ #
+ puts "\tRepmgr$tnum.d: Start client C again."
+ if { $while_active } {
+ set cmds {
+ "home $dirc"
+ "output $testdir/c1output2"
+ "open_env"
+ "remote -p localhost $aport"
+ "start client"
+ }
+ } else {
+ set cmds {
+ "home $dirc"
+ "output $testdir/c1output2"
+ "remote -p localhost $aport"
+ "open_env"
+ "start client"
+ }
+ }
+ set c [open_site_prog [subst $cmds]]
+
+ # Wait for restarted client to catch up with master.
+ set menv [berkdb_env -home $dirm]
+ set seq 0
+ set cond {
+ incr seq
+ puts $m "put newkey$seq newdata$seq"
+ puts $m "echo done"
+ gets $m
+ set log_end [next_expected_lsn $menv]
+ set client_log_end [next_expected_lsn $cenv]
+ expr [string compare $client_log_end $log_end] == 0
+ }
+ await_condition {[eval $cond]}
+
+ # Make sure client B has not serviced any more requests, and that
+ # instead now client A has serviced some.
+
+ error_check_good no_addl_reqs \
+ [repmgr031_get_request_count $benv] $requests_at_B
+ error_check_bad some_requests_at_A [repmgr031_get_request_count $aenv] 0
+
+ $cenv close
+ $benv close
+ $aenv close
+ $menv close
+
+ close $c
+ close $a
+ close $b
+ close $m
+}
+
+proc repmgr031_get_request_count { env } {
+ stat_field $env rep_stat "Client service requests"
+}
diff --git a/db-4.8.30/test/repmgr032.tcl b/db-4.8.30/test/repmgr032.tcl
new file mode 100644
index 0000000..f76dfcc
--- /dev/null
+++ b/db-4.8.30/test/repmgr032.tcl
@@ -0,0 +1,198 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c)-2009 Oracle. All rights reserved.
+#
+# TEST repmgr032
+# TEST Multi-process repmgr start-up policies.
+#
+
+proc repmgr032 { } {
+ source ./include.tcl
+
+ set tnum "032"
+ puts "Repmgr$tnum: repmgr multi-process start policies."
+
+ env_cleanup $testdir
+
+ file mkdir [set dira $testdir/A]
+ file mkdir [set dirb $testdir/B]
+ file mkdir [set dirc $testdir/C]
+
+ set conf {
+ {rep_set_nsites 3}
+ {rep_set_timeout DB_REP_ELECTION_RETRY 3000000}
+ }
+ make_dbconfig $dira $conf
+ make_dbconfig $dirb $conf
+ make_dbconfig $dirc $conf
+ foreach {aport bport cport} [available_ports 3] {}
+
+ puts "\tRepmgr$tnum.a: Create a master and client."
+ set cmds {
+ "home $dira"
+ "local $aport"
+ "output $testdir/a1output1"
+ "open_env"
+ "start master"
+ }
+ set a [open_site_prog [subst $cmds]]
+
+ set cmds {
+ "home $dirb"
+ "local $bport"
+ "output $testdir/boutput1"
+ "remote localhost $aport"
+ "open_env"
+ "start client"
+ }
+ set b [open_site_prog [subst $cmds]]
+
+ puts "\tRepmgr$tnum.b: Wait for start-up done at client."
+ set benv [berkdb_env -home $dirb]
+ await_startup_done $benv
+
+ puts $a "open_db test.db"
+ puts $a "put key1 val1"
+ puts $a "echo done"
+ gets $a
+
+ #
+ # 1. shutting down client and restarting as client should do nothing
+ #
+ puts "\tRepmgr$tnum.c: Shut down client, restart as client."
+ set elections0 [stat_field $benv rep_stat "Elections held"]
+ set aenv [berkdb_env -home $dira]
+ set requests0 [stat_field $aenv rep_stat "Client service requests"]
+ puts $b "exit"
+ gets $b
+ error_check_good eof1 [eof $b] 1
+ close $b
+ set cmds {
+ "home $dirb"
+ "local $bport"
+ "output $testdir/boutput1"
+ "remote localhost $aport"
+ "open_env"
+ "start election"
+ }
+ set b [open_site_prog [subst $cmds]]
+ error_check_good already_startedup \
+ [stat_field $benv rep_stat "Startup complete"] 1
+ puts "\tRepmgr$tnum.d: Pause 20 seconds to check for start-up activity"
+ tclsleep 20
+ error_check_good no_more_requests \
+ [stat_field $aenv rep_stat "Client service requests"] $requests0
+ error_check_good no_more_requests \
+ [stat_field $benv rep_stat "Elections held"] $elections0
+
+ #
+ # 2. Start policy should be ignored if there's already a listener
+ # running in a separate process.
+ #
+
+ # start a second process at master. Even though it specifies "election"
+ # as its start policy, the fact that a listener is already running
+ # should force it to continue as master (IMHO).
+
+ puts "\tRepmgr$tnum.e: Second master process accepts existing role"
+ set cmds {
+ "home $dira"
+ "local $aport"
+ "output $testdir/a2output1"
+ "open_env"
+ "start election"
+ }
+ set a2 [open_site_prog [subst $cmds]]
+
+ # Make sure we still seem to be master, by checking stats, and by trying
+ # to write a new transaction.
+ #
+ error_check_good still_master \
+ [stat_field $aenv rep_stat "Role"] "master"
+
+ puts $a2 "open_db test.db"
+ puts $a2 "put key2 val2"
+ puts $a2 "echo done"
+ gets $a2
+
+ #
+ # 3. Specifying MASTER start policy results in rep_start(MASTER), no
+ # matter what happened previously.
+ #
+ puts "\tRepmgr$tnum.f: Restart master as master."
+
+ puts $a "exit"
+ gets $a
+ error_check_good eof2 [eof $a] 1
+ close $a
+ puts $a2 "exit"
+ gets $a2
+ error_check_good eof3 [eof $a2] 1
+ close $a2
+
+ set initial_gen [stat_field $aenv rep_stat "Generation number"]
+ set cmds {
+ "home $dira"
+ "local $aport"
+ "output $testdir/a2output2"
+ "remote localhost $bport"
+ "open_env"
+ "start master"
+ }
+ set a [open_site_prog [subst $cmds]]
+
+ # Since we were already master, the gen number shouldn't change.
+ error_check_good same_gen \
+ [stat_field $aenv rep_stat "Generation number"] $initial_gen
+
+ puts $a "exit"
+ gets $a
+ error_check_good eof4 [eof $a] 1
+ close $a
+ puts $b "exit"
+ gets $b
+ error_check_good eof5 [eof $b] 1
+ close $b
+
+ puts "\tRepmgr$tnum.g: Restart client as master."
+ # Note that site A is not running at this point.
+ set cmds {
+ "home $dirb"
+ "local $bport"
+ "output $testdir/boutput3"
+ "open_env"
+ "start master"
+ }
+ set b [open_site_prog [subst $cmds]]
+ set gen [stat_field $benv rep_stat "Generation number"]
+ error_check_good bumped_gen [expr $gen > $initial_gen] 1
+
+
+ #
+ # 4. Specifying CLIENT when we were MASTER causes a change
+ #
+ puts $b "exit"
+ gets $b
+ error_check_good eof6 [eof $b] 1
+ close $b
+ $benv close
+ exec $util_path/db_recover -h $dirb
+
+ puts "\tRepmgr$tnum.h: Restart master as client"
+ set initial_value [stat_field $aenv rep_stat "Elections held"]
+ set cmds {
+ "home $dira"
+ "local $aport"
+ "output $testdir/aoutput4"
+ "open_env"
+ "start election"
+ }
+ set a [open_site_prog [subst $cmds]]
+ puts "\tRepmgr$tnum.i: Pause for 10 seconds to wait for elections."
+ tclsleep 10
+ set elections [stat_field $aenv rep_stat "Elections held"]
+ error_check_good bumped_gen [expr $elections > $initial_value] 1
+
+ $aenv close
+ close $a
+}
diff --git a/db-4.8.30/test/reputils.tcl b/db-4.8.30/test/reputils.tcl
new file mode 100644
index 0000000..8b784d6
--- /dev/null
+++ b/db-4.8.30/test/reputils.tcl
@@ -0,0 +1,2743 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Replication testing utilities
+
+# Environment handle for the env containing the replication "communications
+# structure" (really a CDB environment).
+
+# The test environment consists of a queue and a # directory (environment)
+# per replication site. The queue is used to hold messages destined for a
+# particular site and the directory will contain the environment for the
+# site. So the environment looks like:
+# $testdir
+# ___________|______________________________
+# / | \ \
+# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
+# | | ... |
+# 1 2 .. N+1
+#
+# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
+# queues 2 - N+1.
+#
+# The globals repenv(1-N) contain the environment handles for the sites
+# with a given id (i.e., repenv(1) is the master's environment.
+
+
+# queuedbs is an array of DB handles, one per machine ID/machine ID pair,
+# for the databases that contain messages from one machine to another.
+# We omit the cases where the "from" and "to" machines are the same.
+# Since tcl does not have real two-dimensional arrays, we use this
+# naming convention: queuedbs(1.2) has the handle for the database
+# containing messages to machid 1 from machid 2.
+#
+global queuedbs
+global machids
+global perm_response_list
+set perm_response_list {}
+global perm_sent_list
+set perm_sent_list {}
+global elect_timeout
+unset -nocomplain elect_timeout
+set elect_timeout(default) 5000000
+global electable_pri
+set electable_pri 5
+set drop 0
+global anywhere
+set anywhere 0
+
+global rep_verbose
+set rep_verbose 0
+global verbose_type
+set verbose_type "rep"
+
+# To run a replication test with verbose messages, type
+# 'run_verbose' and then the usual test command string enclosed
+# in double quotes or curly braces. For example:
+#
+# run_verbose "rep001 btree"
+#
+# run_verbose {run_repmethod btree test001}
+#
+# To run a replication test with one of the subsets of verbose
+# messages, use the same syntax with 'run_verbose_elect',
+# 'run_verbose_lease', etc.
+
+proc run_verbose { commandstring } {
+ global verbose_type
+ set verbose_type "rep"
+ run_verb $commandstring
+}
+
+proc run_verbose_elect { commandstring } {
+ global verbose_type
+ set verbose_type "rep_elect"
+ run_verb $commandstring
+}
+
+proc run_verbose_lease { commandstring } {
+ global verbose_type
+ set verbose_type "rep_lease"
+ run_verb $commandstring
+}
+
+proc run_verbose_misc { commandstring } {
+ global verbose_type
+ set verbose_type "rep_misc"
+ run_verb $commandstring
+}
+
+proc run_verbose_msgs { commandstring } {
+ global verbose_type
+ set verbose_type "rep_msgs"
+ run_verb $commandstring
+}
+
+proc run_verbose_sync { commandstring } {
+ global verbose_type
+ set verbose_type "rep_sync"
+ run_verb $commandstring
+}
+
+proc run_verbose_test { commandstring } {
+ global verbose_type
+ set verbose_type "rep_test"
+ run_verb $commandstring
+}
+
+proc run_verbose_repmgr_misc { commandstring } {
+ global verbose_type
+ set verbose_type "repmgr_misc"
+ run_verb $commandstring
+}
+
+proc run_verb { commandstring } {
+ global rep_verbose
+ global verbose_type
+
+ set rep_verbose 1
+ if { [catch {
+ eval $commandstring
+ flush stdout
+ flush stderr
+ } res] != 0 } {
+ global errorInfo
+
+ set rep_verbose 0
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_verbose: $commandstring: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set rep_verbose 0
+}
+
+# Databases are on-disk by default for replication testing.
+# Some replication tests have been converted to run with databases
+# in memory instead.
+
+global databases_in_memory
+set databases_in_memory 0
+
+proc run_inmem_db { test method } {
+ run_inmem $test $method 1 0 0 0
+}
+
+# Replication files are on-disk by default for replication testing.
+# Some replication tests have been converted to run with rep files
+# in memory instead.
+
+global repfiles_in_memory
+set repfiles_in_memory 0
+
+proc run_inmem_rep { test method } {
+ run_inmem $test $method 0 0 1 0
+}
+
+# Region files are on-disk by default for replication testing.
+# Replication tests can force the region files in-memory by setting
+# the -private flag when opening an env.
+
+global env_private
+set env_private 0
+
+proc run_env_private { test method } {
+ run_inmem $test $method 0 0 0 1
+}
+
+# Logs are on-disk by default for replication testing.
+# Mixed-mode log testing provides a mixture of on-disk and
+# in-memory logging, or even all in-memory. When testing on a
+# 1-master/1-client test, we try all four options. On a test
+# with more clients, we still try four options, randomly
+# selecting whether the later clients are on-disk or in-memory.
+#
+
+global mixed_mode_logging
+set mixed_mode_logging 0
+
+proc create_logsets { nsites } {
+ global mixed_mode_logging
+ global logsets
+ global rand_init
+
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+ if { $mixed_mode_logging == 0 || $mixed_mode_logging == 2 } {
+ if { $mixed_mode_logging == 0 } {
+ set logmode "on-disk"
+ } else {
+ set logmode "in-memory"
+ }
+ set loglist {}
+ for { set i 0 } { $i < $nsites } { incr i } {
+ lappend loglist $logmode
+ }
+ set logsets [list $loglist]
+ }
+ if { $mixed_mode_logging == 1 } {
+ set set1 {on-disk on-disk}
+ set set2 {on-disk in-memory}
+ set set3 {in-memory on-disk}
+ set set4 {in-memory in-memory}
+
+ # Start with nsites at 2 since we already set up
+ # the master and first client.
+ for { set i 2 } { $i < $nsites } { incr i } {
+ foreach set { set1 set2 set3 set4 } {
+ if { [berkdb random_int 0 1] == 0 } {
+ lappend $set "on-disk"
+ } else {
+ lappend $set "in-memory"
+ }
+ }
+ }
+ set logsets [list $set1 $set2 $set3 $set4]
+ }
+ return $logsets
+}
+
+proc run_inmem_log { test method } {
+ run_inmem $test $method 0 1 0 0
+}
+
+# Run_mixedmode_log is a little different from the other run_inmem procs:
+# it provides a mixture of in-memory and on-disk logging on the different
+# hosts in a replication group.
+proc run_mixedmode_log { test method {display 0} {run 1} \
+ {outfile stdout} {largs ""} } {
+ global mixed_mode_logging
+ set mixed_mode_logging 1
+
+ set prefix [string range $test 0 2]
+ if { $prefix != "rep" } {
+ puts "Skipping mixed-mode log testing for non-rep test."
+ set mixed_mode_logging 0
+ return
+ }
+
+ eval run_method $method $test $display $run $outfile $largs
+
+ # Reset to default values after run.
+ set mixed_mode_logging 0
+}
+
+# The procs run_inmem_db, run_inmem_log, run_inmem_rep, and run_env_private
+# put databases, logs, rep files, or region files in-memory. (Setting up
+# an env with the -private flag puts region files in memory.)
+# The proc run_inmem allows you to put any or all of these in-memory
+# at the same time.
+
+proc run_inmem { test method\
+ {dbinmem 1} {logsinmem 1} {repinmem 1} {envprivate 1} } {
+
+ set prefix [string range $test 0 2]
+ if { $prefix != "rep" } {
+ puts "Skipping in-memory testing for non-rep test."
+ return
+ }
+ global databases_in_memory
+ global mixed_mode_logging
+ global repfiles_in_memory
+ global env_private
+ global test_names
+
+ if { $dbinmem } {
+ if { [is_substr $test_names(rep_inmem) $test] == 0 } {
+ puts "Test $test does not support in-memory databases."
+ puts "Putting databases on-disk."
+ set databases_in_memory 0
+ } else {
+ set databases_in_memory 1
+ }
+ }
+ if { $logsinmem } {
+ set mixed_mode_logging 2
+ }
+ if { $repinmem } {
+ set repfiles_in_memory 1
+ }
+ if { $envprivate } {
+ set env_private 1
+ }
+
+ if { [catch {eval run_method $method $test} res] } {
+ set databases_in_memory 0
+ set mixed_mode_logging 0
+ set repfiles_in_memory 0
+ set env_private 0
+ puts "FAIL: $res"
+ }
+
+ set databases_in_memory 0
+ set mixed_mode_logging 0
+ set repfiles_in_memory 0
+ set env_private 0
+}
+
+# The proc run_diskless runs run_inmem with its default values.
+# It's useful to have this name to remind us of its testing purpose,
+# which is to mimic a diskless host.
+
+proc run_diskless { test method } {
+ run_inmem $test $method 1 1 1 1
+}
+
+# Open the master and client environments; store these in the global repenv
+# Return the master's environment: "-env masterenv"
+proc repl_envsetup { envargs largs test {nclients 1} {droppct 0} { oob 0 } } {
+ source ./include.tcl
+ global clientdir
+ global drop drop_msg
+ global masterdir
+ global repenv
+ global rep_verbose
+ global verbose_type
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on}"
+ }
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ if { $droppct != 0 } {
+ set drop 1
+ set drop_msg [expr 100 / $droppct]
+ } else {
+ set drop 0
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ # Open a master.
+ repladd 1
+ #
+ # Set log smaller than default to force changing files,
+ # but big enough so that the tests that use binary files
+ # as keys/data can run. Increase the size of the log region --
+ # sdb004 needs this, now that subdatabase names are stored
+ # in the env region.
+ #
+ set logmax [expr 3 * 1024 * 1024]
+ set lockmax 40000
+ set logregion 2097152
+
+ set ma_cmd "berkdb_env_noerr -create -log_max $logmax $envargs \
+ -cachesize { 0 4194304 1 } -log_regionmax $logregion \
+ -lock_max_objects $lockmax -lock_max_locks $lockmax \
+ -errpfx $masterdir $verbargs \
+ -home $masterdir -txn nosync -rep_master -rep_transport \
+ \[list 1 replsend\]"
+ set masterenv [eval $ma_cmd]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ set repenv(master) $masterenv
+
+ # Open clients
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set cl_cmd "berkdb_env_noerr -create $envargs -txn nosync \
+ -cachesize { 0 10000000 0 } -log_regionmax $logregion \
+ -lock_max_objects $lockmax -lock_max_locks $lockmax \
+ -errpfx $clientdir($i) $verbargs \
+ -home $clientdir($i) -rep_client -rep_transport \
+ \[list $envid replsend\]"
+ set clientenv [eval $cl_cmd]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set repenv($i) $clientenv
+ }
+ set repenv($i) NULL
+ append largs " -env $masterenv "
+
+ # Process startup messages
+ repl_envprocq $test $nclients $oob
+
+ # Clobber replication's 30-second anti-archive timer, which
+ # will have been started by client sync-up internal init, in
+ # case the test we're about to run wants to do any log
+ # archiving, or database renaming and/or removal.
+ $masterenv test force noarchive_timeout
+
+ return $largs
+}
+
+# Process all incoming messages. Iterate until there are no messages left
+# in anyone's queue so that we capture all message exchanges. We verify that
+# the requested number of clients matches the number of client environments
+# we have. The oob parameter indicates if we should process the queue
+# with out-of-order delivery. The replprocess procedure actually does
+# the real work of processing the queue -- this routine simply iterates
+# over the various queues and does the initial setup.
+proc repl_envprocq { test { nclients 1 } { oob 0 }} {
+ global repenv
+ global drop
+
+ set masterenv $repenv(master)
+ for { set i 0 } { 1 } { incr i } {
+ if { $repenv($i) == "NULL"} {
+ break
+ }
+ }
+ error_check_good i_nclients $nclients $i
+
+ berkdb debug_check
+ puts -nonewline "\t$test: Processing master/$i client queues"
+ set rand_skip 0
+ if { $oob } {
+ puts " out-of-order"
+ } else {
+ puts " in order"
+ }
+ set droprestore $drop
+ while { 1 } {
+ set nproced 0
+
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ incr nproced [replprocessqueue $masterenv 1 $rand_skip]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ set n [replprocessqueue $repenv($i) \
+ $envid $rand_skip]
+ incr nproced $n
+ }
+
+ if { $nproced == 0 } {
+ # Now that we delay requesting records until
+ # we've had a few records go by, we should always
+ # see that the number of requests is lower than the
+ # number of messages that were enqueued.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ set queued [stat_field $clientenv rep_stat \
+ "Total log records queued"]
+ error_check_bad queued_stats \
+ $queued -1
+ set requested [stat_field $clientenv rep_stat \
+ "Log records requested"]
+ error_check_bad requested_stats \
+ $requested -1
+
+ #
+ # Set to 100 usecs. An average ping
+ # to localhost should be a few 10s usecs.
+ #
+ $clientenv rep_request 100 400
+ }
+
+ # If we were dropping messages, we might need
+ # to flush the log so that we get everything
+ # and end up in the right state.
+ if { $drop != 0 } {
+ set drop 0
+ $masterenv rep_flush
+ berkdb debug_check
+ puts "\t$test: Flushing Master"
+ } else {
+ break
+ }
+ }
+ }
+
+ # Reset the clients back to the default state in case we
+ # have more processing to do.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ $clientenv rep_request 40000 1280000
+ }
+ set drop $droprestore
+}
+
+# Verify that the directories in the master are exactly replicated in
+# each of the client environments.
+proc repl_envver0 { test method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Verify the database in the client dir.
+ # First dump the master.
+ set t1 $masterdir/t1
+ set t2 $masterdir/t2
+ set t3 $masterdir/t3
+ set omethod [convert_method $method]
+
+ #
+ # We are interested in the keys of whatever databases are present
+ # in the master environment, so we just call a no-op check function
+ # since we have no idea what the contents of this database really is.
+ # We just need to walk the master and the clients and make sure they
+ # have the same contents.
+ #
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
+ repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ }
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$test: Verifying client $i database $testfile contents."
+ open_and_dump_file $testfile $repenv($i) \
+ $t1 repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ } else {
+ catch {file copy -force $t1 $t3} ret
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+ }
+ }
+}
+
+# Remove all the elements from the master and verify that these
+# deletions properly propagated to the clients.
+proc repl_verdel { test method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Delete all items in the master.
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ puts "\t$test: Deleting all items from the master."
+ set txn [$repenv(master) txn]
+ error_check_good txn_begin [is_valid_txn $txn \
+ $repenv(master)] TRUE
+ set db [eval berkdb_open -txn $txn -env $repenv(master) \
+ $testfile]
+ error_check_good reopen_master [is_valid_db $db] TRUE
+ set dbc [$db cursor -txn $txn]
+ error_check_good reopen_master_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good del_item [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ repl_envprocq $test $nclients
+
+ # Check clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$test: Verifying client database $i is empty."
+
+ set db [eval berkdb_open -env $repenv($i) $testfile]
+ error_check_good reopen_client($i) \
+ [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good reopen_client_cursor($i) \
+ [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good client($i)_empty \
+ [llength [$dbc get -first]] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
+
+# Replication "check" function for the dump procs that expect to
+# be able to verify the keys and data.
+proc repl_noop { k d } {
+ return
+}
+
+# Close all the master and client environments in a replication test directory.
+proc repl_envclose { test envargs } {
+ source ./include.tcl
+ global clientdir
+ global encrypt
+ global masterdir
+ global repenv
+ global drop
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+
+ # In order to make sure that we have fully-synced and ready-to-verify
+ # databases on all the clients, do a checkpoint on the master and
+ # process messages in order to flush all the clients.
+ set drop 0
+ berkdb debug_check
+ puts "\t$test: Checkpointing master."
+ error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
+
+ # Count clients.
+ for { set ncli 0 } { 1 } { incr ncli } {
+ if { $repenv($ncli) == "NULL" } {
+ break
+ }
+ $repenv($ncli) rep_request 100 100
+ }
+ repl_envprocq $test $ncli
+
+ error_check_good masterenv_close [$repenv(master) close] 0
+ verify_dir $masterdir "\t$test: " 0 0 1
+ for { set i 0 } { $i < $ncli } { incr i } {
+ error_check_good client($i)_close [$repenv($i) close] 0
+ verify_dir $clientdir($i) "\t$test: " 0 0 1
+ }
+ replclose $testdir/MSGQUEUEDIR
+
+}
+
+# Replnoop is a dummy function to substitute for replsend
+# when replication is off.
+proc replnoop { control rec fromid toid flags lsn } {
+ return 0
+}
+
+proc replclose { queuedir } {
+ global queueenv queuedbs machids
+
+ foreach m $machids {
+ set db $queuedbs($m)
+ error_check_good dbr_close [$db close] 0
+ }
+ error_check_good qenv_close [$queueenv close] 0
+ set machids {}
+}
+
+# Create a replication group for testing.
+proc replsetup { queuedir } {
+ global queueenv queuedbs machids
+
+ file mkdir $queuedir
+ set max_locks 20000
+ set queueenv [berkdb_env \
+ -create -txn nosync -lock_max_locks $max_locks -home $queuedir]
+ error_check_good queueenv [is_valid_env $queueenv] TRUE
+
+ if { [info exists queuedbs] } {
+ unset queuedbs
+ }
+ set machids {}
+
+ return $queueenv
+}
+
+# Send function for replication.
+proc replsend { control rec fromid toid flags lsn } {
+ global queuedbs queueenv machids
+ global drop drop_msg
+ global perm_sent_list
+ global anywhere
+
+ set permflags [lsearch $flags "perm"]
+ if { [llength $perm_sent_list] != 0 && $permflags != -1 } {
+# puts "replsend sent perm message, LSN $lsn"
+ lappend perm_sent_list $lsn
+ }
+
+ #
+ # If we are testing with dropped messages, then we drop every
+ # $drop_msg time. If we do that just return 0 and don't do
+ # anything.
+ #
+ if { $drop != 0 } {
+ incr drop
+ if { $drop == $drop_msg } {
+ set drop 1
+ return 0
+ }
+ }
+ # XXX
+ # -1 is DB_BROADCAST_EID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ if { [info exists queuedbs($toid)] != 1 } {
+ error "replsend: machid $toid not found"
+ }
+ set m NULL
+ if { $anywhere != 0 } {
+ #
+ # If we can send this anywhere, send it to the first
+ # id we find that is neither toid or fromid.
+ #
+ set anyflags [lsearch $flags "any"]
+ if { $anyflags != -1 } {
+ foreach m $machids {
+ if { $m == $fromid || $m == $toid } {
+ continue
+ }
+ set machlist [list $m]
+ break
+ }
+ }
+ }
+ #
+ # If we didn't find a different site, then we must
+ # fallback to the toid.
+ #
+ if { $m == "NULL" } {
+ set machlist [list $toid]
+ }
+ }
+
+ foreach m $machlist {
+ # do not broadcast to self.
+ if { $m == $fromid } {
+ continue
+ }
+
+ set db $queuedbs($m)
+ set txn [$queueenv txn]
+ $db put -txn $txn -append [list $control $rec $fromid]
+ error_check_good replsend_commit [$txn commit] 0
+ }
+
+ queue_logcheck
+ return 0
+}
+
+#
+# If the message queue log files are getting too numerous, checkpoint
+# and archive them. Some tests are so large (particularly from
+# run_repmethod) that they can consume far too much disk space.
+proc queue_logcheck { } {
+ global queueenv
+
+
+ set logs [$queueenv log_archive -arch_log]
+ set numlogs [llength $logs]
+ if { $numlogs > 10 } {
+ $queueenv txn_checkpoint
+ $queueenv log_archive -arch_remove
+ }
+}
+
+# Discard all the pending messages for a particular site.
+proc replclear { machid } {
+ global queuedbs queueenv
+
+ if { [info exists queuedbs($machid)] != 1 } {
+ error "FAIL: replclear: machid $machid not found"
+ }
+
+ set db $queuedbs($machid)
+ set txn [$queueenv txn]
+ set dbc [$db cursor -txn $txn]
+ for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -rmw -next] } {
+ error_check_good replclear($machid)_del [$dbc del] 0
+ }
+ error_check_good replclear($machid)_dbc_close [$dbc close] 0
+ error_check_good replclear($machid)_txn_commit [$txn commit] 0
+}
+
+# Add a machine to a replication environment.
+proc repladd { machid } {
+ global queueenv queuedbs machids
+
+ if { [info exists queuedbs($machid)] == 1 } {
+ error "FAIL: repladd: machid $machid already exists"
+ }
+
+ set queuedbs($machid) [berkdb open -auto_commit \
+ -env $queueenv -create -recno -renumber repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+# Acquire a handle to work with an existing machine's replication
+# queue. This is for situations where more than one process
+# is working with a message queue. In general, having more than one
+# process handle the queue is wrong. However, in order to test some
+# things, we need two processes (since Tcl doesn't support threads). We
+# go to great pain in the test harness to make sure this works, but we
+# don't let customers do it.
+proc repljoin { machid } {
+ global queueenv queuedbs machids
+
+ set queuedbs($machid) [berkdb open -auto_commit \
+ -env $queueenv repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+# Process a queue of messages, skipping every "skip_interval" entry.
+# We traverse the entire queue, but since we skip some messages, we
+# may end up leaving things in the queue, which should get picked up
+# on a later run.
+proc replprocessqueue { dbenv machid { skip_interval 0 } { hold_electp NONE } \
+ { dupmasterp NONE } { errp NONE } } {
+ global queuedbs queueenv errorCode
+ global perm_response_list
+ global startup_done
+
+ # hold_electp is a call-by-reference variable which lets our caller
+ # know we need to hold an election.
+ if { [string compare $hold_electp NONE] != 0 } {
+ upvar $hold_electp hold_elect
+ }
+ set hold_elect 0
+
+ # dupmasterp is a call-by-reference variable which lets our caller
+ # know we have a duplicate master.
+ if { [string compare $dupmasterp NONE] != 0 } {
+ upvar $dupmasterp dupmaster
+ }
+ set dupmaster 0
+
+ # errp is a call-by-reference variable which lets our caller
+ # know we have gotten an error (that they expect).
+ if { [string compare $errp NONE] != 0 } {
+ upvar $errp errorp
+ }
+ set errorp 0
+
+ set nproced 0
+
+ set txn [$queueenv txn]
+
+ # If we are running separate processes, the second process has
+ # to join an existing message queue.
+ if { [info exists queuedbs($machid)] == 0 } {
+ repljoin $machid
+ }
+
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $queuedbs($machid)] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { } {
+ set data [lindex [lindex $dbt 0] 1]
+ set recno [lindex [lindex $dbt 0] 0]
+
+ # If skip_interval is nonzero, we want to process messages
+ # out of order. We do this in a simple but slimy way--
+ # continue walking with the cursor without processing the
+ # message or deleting it from the queue, but do increment
+ # "nproced". The way this proc is normally used, the
+ # precise value of nproced doesn't matter--we just don't
+ # assume the queues are empty if it's nonzero. Thus,
+ # if we contrive to make sure it's nonzero, we'll always
+ # come back to records we've skipped on a later call
+ # to replprocessqueue. (If there really are no records,
+ # we'll never get here.)
+ #
+ # Skip every skip_interval'th record (and use a remainder other
+ # than zero so that we're guaranteed to really process at least
+ # one record on every call).
+ if { $skip_interval != 0 } {
+ if { $nproced % $skip_interval == 1 } {
+ incr nproced
+ set dbt [$dbc get -next]
+ continue
+ }
+ }
+
+ # We need to remove the current message from the queue,
+ # because we're about to end the transaction and someone
+ # else processing messages might come in and reprocess this
+ # message which would be bad.
+ error_check_good queue_remove [$dbc del] 0
+
+ # We have to play an ugly cursor game here: we currently
+ # hold a lock on the page of messages, but rep_process_message
+ # might need to lock the page with a different cursor in
+ # order to send a response. So save the next recno, close
+ # the cursor, and then reopen and reset the cursor.
+ # If someone else is processing this queue, our entry might
+ # have gone away, and we need to be able to handle that.
+
+ error_check_good dbc_process_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ set ret [catch {$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
+
+ # Save all ISPERM and NOTPERM responses so we can compare their
+ # LSNs to the LSN in the log. The variable perm_response_list
+ # holds the entire response so we can extract responses and
+ # LSNs as needed.
+ #
+ if { [llength $perm_response_list] != 0 && \
+ ([is_substr $res ISPERM] || [is_substr $res NOTPERM]) } {
+ lappend perm_response_list $res
+ }
+
+ if { $ret != 0 } {
+ if { [string compare $errp NONE] != 0 } {
+ set errorp "$dbenv $machid $res"
+ } else {
+ error "FAIL:[timestamp]\
+ rep_process_message returned $res"
+ }
+ }
+
+ incr nproced
+
+ # Now, re-establish the cursor position. We fetch the
+ # current record number. If there is something there,
+ # that is the record for the next iteration. If there
+ # is nothing there, then we've consumed the last item
+ # in the queue.
+
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+ set dbt [$dbc get -set_range $recno]
+
+ if { $ret == 0 } {
+ set rettype [lindex $res 0]
+ set retval [lindex $res 1]
+ #
+ # Do nothing for 0 and NEWSITE
+ #
+ if { [is_substr $rettype STARTUPDONE] } {
+ set startup_done 1
+ }
+ if { [is_substr $rettype HOLDELECTION] } {
+ set hold_elect 1
+ }
+ if { [is_substr $rettype DUPMASTER] } {
+ set dupmaster "1 $dbenv $machid"
+ }
+ if { [is_substr $rettype NOTPERM] || \
+ [is_substr $rettype ISPERM] } {
+ set lsnfile [lindex $retval 0]
+ set lsnoff [lindex $retval 1]
+ }
+ }
+
+ if { $errorp != 0 } {
+ # Break also on an error, caller wants to handle it.
+ break
+ }
+ if { $hold_elect == 1 } {
+ # Break also on a HOLDELECTION, for the same reason.
+ break
+ }
+ if { $dupmaster == 1 } {
+ # Break also on a DUPMASTER, for the same reason.
+ break
+ }
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # Return the number of messages processed.
+ return $nproced
+}
+
+
+set run_repl_flag "-run_repl"
+
+proc extract_repl_args { args } {
+ global run_repl_flag
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] == 0 } {
+ return [lindex $args [expr $i + 1]]
+ }
+ }
+ return ""
+}
+
+proc delete_repl_args { args } {
+ global run_repl_flag
+
+ set ret {}
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] != 0 } {
+ lappend ret $arg
+ } else {
+ incr i
+ }
+ }
+ return $ret
+}
+
+global elect_serial
+global elections_in_progress
+set elect_serial 0
+
+# Start an election in a sub-process.
+proc start_election \
+ { pfx qdir envstring nsites nvotes pri timeout {err "none"} {crash 0}} {
+ source ./include.tcl
+ global elect_serial elections_in_progress machids
+ global rep_verbose
+
+ set filelist {}
+ set ret [catch {glob $testdir/ELECTION*.$elect_serial} result]
+ if { $ret == 0 } {
+ set filelist [concat $filelist $result]
+ }
+ foreach f $filelist {
+ fileremove -f $f
+ }
+
+ set oid [open $testdir/ELECTION_SOURCE.$elect_serial w]
+
+ puts $oid "source $test_path/test.tcl"
+ puts $oid "set elected_event 0"
+ puts $oid "set elected_env \"NONE\""
+ puts $oid "set is_repchild 1"
+ puts $oid "replsetup $qdir"
+ foreach i $machids { puts $oid "repladd $i" }
+ puts $oid "set env_cmd \{$envstring\}"
+ if { $rep_verbose == 1 } {
+ puts $oid "set dbenv \[eval \$env_cmd -errfile \
+ /dev/stdout -errpfx $pfx \]"
+ } else {
+ puts $oid "set dbenv \[eval \$env_cmd -errfile \
+ $testdir/ELECTION_ERRFILE.$elect_serial -errpfx $pfx \]"
+ }
+ puts $oid "\$dbenv test abort $err"
+ puts $oid "set res \[catch \{\$dbenv rep_elect $nsites \
+ $nvotes $pri $timeout\} ret\]"
+ puts $oid "set r \[open \$testdir/ELECTION_RESULT.$elect_serial w\]"
+ puts $oid "if \{\$res == 0 \} \{"
+ puts $oid "puts \$r \"SUCCESS \$ret\""
+ puts $oid "\} else \{"
+ puts $oid "puts \$r \"ERROR \$ret\""
+ puts $oid "\}"
+ #
+ # This loop calls rep_elect a second time with the error cleared.
+ # We don't want to do that if we are simulating a crash.
+ if { $err != "none" && $crash != 1 } {
+ puts $oid "\$dbenv test abort none"
+ puts $oid "set res \[catch \{\$dbenv rep_elect $nsites \
+ $nvotes $pri $timeout\} ret\]"
+ puts $oid "if \{\$res == 0 \} \{"
+ puts $oid "puts \$r \"SUCCESS \$ret\""
+ puts $oid "\} else \{"
+ puts $oid "puts \$r \"ERROR \$ret\""
+ puts $oid "\}"
+ }
+
+ puts $oid "if \{ \$elected_event == 1 \} \{"
+ puts $oid "puts \$r \"ELECTED \$elected_env\""
+ puts $oid "\}"
+
+ puts $oid "close \$r"
+ close $oid
+
+ set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
+ if { $rep_verbose } {
+ set t [open "|$tclsh_path" w]
+ }
+ puts $t "source ./include.tcl"
+ puts $t "source $testdir/ELECTION_SOURCE.$elect_serial"
+ flush $t
+
+ set elections_in_progress($elect_serial) $t
+ return $elect_serial
+}
+
+#
+# If we are doing elections during upgrade testing, set
+# upgrade to 1. Doing that sets the priority to the
+# test priority in rep_elect, which will simulate a
+# 0-priority but electable site.
+#
+proc setpriority { priority nclients winner {start 0} {upgrade 0} } {
+ global electable_pri
+ upvar $priority pri
+
+ for { set i $start } { $i < [expr $nclients + $start] } { incr i } {
+ if { $i == $winner } {
+ set pri($i) 100
+ } else {
+ if { $upgrade } {
+ set pri($i) $electable_pri
+ } else {
+ set pri($i) 10
+ }
+ }
+ }
+}
+
+# run_election has the following arguments:
+# Arrays:
+# ecmd Array of the commands for setting up each client env.
+# cenv Array of the handles to each client env.
+# errcmd Array of where errors should be forced.
+# priority Array of the priorities of each client env.
+# crash If an error is forced, should we crash or recover?
+# The upvar command takes care of making these arrays available to
+# the procedure.
+#
+# Ordinary variables:
+# qdir Directory where the message queue is located.
+# msg Message prefixed to the output.
+# elector This client calls the first election.
+# nsites Number of sites in the replication group.
+# nvotes Number of votes required to win the election.
+# nclients Number of clients participating in the election.
+# win The expected winner of the election.
+# reopen Should the new master (i.e. winner) be closed
+# and reopened as a client?
+# dbname Name of the underlying database. The caller
+# should send in "NULL" if the database has not
+# yet been created.
+# ignore Should the winner ignore its own election?
+# If ignore is 1, the winner is not made master.
+# timeout_ok We expect that this election will not succeed
+# in electing a new master (perhaps because there
+# already is a master).
+
+proc run_election { ecmd celist errcmd priority crsh\
+ qdir msg elector nsites nvotes nclients win reopen\
+ dbname {ignore 0} {timeout_ok 0} } {
+
+ global elect_timeout elect_serial
+ global is_hp_test
+ global is_windows_test
+ global rand_init
+ upvar $ecmd env_cmd
+ upvar $celist cenvlist
+ upvar $errcmd err_cmd
+ upvar $priority pri
+ upvar $crsh crash
+
+ set elect_timeout(default) 15000000
+ # Windows and HP-UX require a longer timeout.
+ if { $is_windows_test == 1 || $is_hp_test == 1 } {
+ set elect_timeout(default) [expr $elect_timeout(default) * 2]
+ }
+
+ set long_timeout $elect_timeout(default)
+ #
+ # Initialize tries based on the default timeout.
+ # We use tries to loop looking for messages because
+ # as sites are sleeping waiting for their timeout
+ # to expire we need to keep checking for messages.
+ #
+ set tries [expr [expr $long_timeout * 4] / 1000000]
+ #
+ # Retry indicates whether the test should retry the election
+ # if it gets a timeout. This is primarily used for the
+ # varied timeout election test because we expect short timeouts
+ # to timeout when interacting with long timeouts and the
+ # short timeout sites need to call elections again.
+ #
+ set retry 0
+ foreach pair $cenvlist {
+ set id [lindex $pair 1]
+ set i [expr $id - 2]
+ set elect_pipe($i) INVALID
+ #
+ # Array get should return us a list of 1 element:
+ # { {$i timeout_value} }
+ # If that doesn't exist, use the default.
+ #
+ set this_timeout [array get elect_timeout $i]
+ if { [llength $this_timeout] } {
+ set e_timeout($i) [lindex $this_timeout 1]
+ #
+ # Set number of tries based on the biggest
+ # timeout we see in this group if using
+ # varied timeouts.
+ #
+ set retry 1
+ if { $e_timeout($i) > $long_timeout } {
+ set long_timeout $e_timeout($i)
+ set tries [expr $long_timeout / 1000000]
+ }
+ } else {
+ set e_timeout($i) $elect_timeout(default)
+ }
+ replclear $id
+ }
+
+ #
+ # XXX
+ # We need to somehow check for the warning if nvotes is not
+ # a majority. Problem is that warning will go into the child
+ # process' output. Furthermore, we need a mechanism that can
+ # handle both sending the output to a file and sending it to
+ # /dev/stderr when debugging without failing the
+ # error_check_good check.
+ #
+ puts "\t\t$msg.1: Election with nsites=$nsites,\
+ nvotes=$nvotes, nclients=$nclients"
+ puts "\t\t$msg.2: First elector is $elector,\
+ expected winner is $win (eid [expr $win + 2])"
+ incr elect_serial
+ set pfx "CHILD$elector.$elect_serial"
+ set elect_pipe($elector) [start_election \
+ $pfx $qdir $env_cmd($elector) $nsites $nvotes $pri($elector) \
+ $e_timeout($elector) $err_cmd($elector) $crash($elector)]
+ tclsleep 2
+
+ set got_newmaster 0
+ set max_retry $tries
+
+ # If we're simulating a crash, skip the while loop and
+ # just give the initial election a chance to complete.
+ set crashing 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ if { $crash($i) == 1 } {
+ set crashing 1
+ }
+ }
+
+ global elected_event
+ global elected_env
+ set elected_event 0
+ set c_elected_event 0
+ set elected_env "NONE"
+
+ set orig_tries $tries
+ if { $crashing == 1 } {
+ tclsleep 10
+ } else {
+ set retry_cnt 0
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set winning_envid -1
+ set c_winning_envid -1
+
+ foreach pair $cenvlist {
+ set he 0
+ set unavail 0
+ set envid [lindex $pair 1]
+ set i [expr $envid - 2]
+ set clientenv($i) [lindex $pair 0]
+
+ # If the "elected" event is received by the
+ # child process, the env set up in that child
+ # is the elected env.
+ set child_done [check_election $elect_pipe($i)\
+ unavail c_elected_event c_elected_env]
+ if { $c_elected_event != 0 } {
+ set elected_event 1
+ set c_winning_envid $envid
+ set c_elected_event 0
+ }
+
+ incr nproced [replprocessqueue \
+ $clientenv($i) $envid 0 he]
+# puts "Tries $tries:\
+# Processed queue for client $i, $nproced msgs he $he unavail $unavail"
+
+ # Check for completed election. If it's the
+ # first time we've noticed it, deal with it.
+ if { $elected_event == 1 && \
+ $got_newmaster == 0 } {
+ set got_newmaster 1
+
+ # Find env id of winner.
+ if { $c_winning_envid != -1 } {
+ set winning_envid \
+ $c_winning_envid
+ set c_winning_envid -1
+ } else {
+ foreach pair $cenvlist {
+ if { [lindex $pair 0]\
+ == $elected_env } {
+ set winning_envid \
+ [lindex $pair 1]
+ break
+ }
+ }
+ }
+
+ # Make sure it's the expected winner.
+ error_check_good right_winner \
+ $winning_envid [expr $win + 2]
+
+ # Reconfigure winning env as master.
+ if { $ignore == 0 } {
+ $clientenv($i) errpfx \
+ NEWMASTER
+ error_check_good \
+ make_master($i) \
+ [$clientenv($i) \
+ rep_start -master] 0
+
+ # Don't hold another election
+ # yet if we are setting up a
+ # new master. This could
+ # cause the new master to
+ # declare itself a client
+ # during internal init.
+ set he 0
+ }
+
+ # Occasionally force new log records
+ # to be written, unless the database
+ # has not yet been created.
+ set write [berkdb random_int 1 10]
+ if { $write == 1 && $dbname != "NULL" } {
+ set db [eval berkdb_open_noerr \
+ -env $clientenv($i) \
+ -auto_commit $dbname]
+ error_check_good dbopen \
+ [is_valid_db $db] TRUE
+ error_check_good dbclose \
+ [$db close] 0
+ }
+ }
+
+ # If the previous election failed with a
+ # timeout and we need to retry because we
+ # are testing varying site timeouts, force
+ # a hold election to start a new one.
+ if { $unavail && $retry && $retry_cnt < $max_retry} {
+ incr retry_cnt
+ puts "\t\t$msg.2.b: Client $i timed\
+ out. Retry $retry_cnt\
+ of max $max_retry"
+ set he 1
+ set tries $orig_tries
+ }
+ if { $he == 1 && $got_newmaster == 0 } {
+ #
+ # Only close down the election pipe if the
+ # previously created one is done and
+ # waiting for new commands, otherwise
+ # if we try to close it while it's in
+ # progress we hang this main tclsh.
+ #
+ if { $elect_pipe($i) != "INVALID" && \
+ $child_done == 1 } {
+ close_election $elect_pipe($i)
+ set elect_pipe($i) "INVALID"
+ }
+# puts "Starting election on client $i"
+ if { $elect_pipe($i) == "INVALID" } {
+ incr elect_serial
+ set pfx "CHILD$i.$elect_serial"
+ set elect_pipe($i) [start_election \
+ $pfx $qdir \
+ $env_cmd($i) $nsites \
+ $nvotes $pri($i) $e_timeout($i)]
+ set got_hold_elect($i) 1
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ #
+ # If we have a newmaster already, set tries
+ # down to just allow straggling messages to
+ # be processed. Tries could be a very large
+ # number if we have long timeouts.
+ #
+ if { $got_newmaster != 0 && $tries > 10 } {
+ set tries 10
+ }
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ } else {
+ set tries $tries
+ }
+ }
+
+ # If we did get a new master, its identity was checked
+ # at that time. But we still have to make sure that we
+ # didn't just time out.
+
+ if { $got_newmaster == 0 && $timeout_ok == 0 } {
+ error "FAIL: Did not elect new master."
+ }
+ }
+ cleanup_elections
+
+ #
+ # Make sure we've really processed all the post-election
+ # sync-up messages. If we're simulating a crash, don't process
+ # any more messages.
+ #
+ if { $crashing == 0 } {
+ process_msgs $cenvlist
+ }
+
+ if { $reopen == 1 } {
+ puts "\t\t$msg.3: Closing new master and reopening as client"
+ error_check_good log_flush [$clientenv($win) log_flush] 0
+ error_check_good newmaster_close [$clientenv($win) close] 0
+
+ set clientenv($win) [eval $env_cmd($win)]
+ error_check_good cl($win) [is_valid_env $clientenv($win)] TRUE
+ set newelector "$clientenv($win) [expr $win + 2]"
+ set cenvlist [lreplace $cenvlist $win $win $newelector]
+ if { $crashing == 0 } {
+ process_msgs $cenvlist
+ }
+ }
+}
+
+proc check_election { id unavailp elected_eventp elected_envp } {
+ source ./include.tcl
+
+ if { $id == "INVALID" } {
+ return 0
+ }
+ upvar $unavailp unavail
+ upvar $elected_eventp elected_event
+ upvar $elected_envp elected_env
+
+ set unavail 0
+ set elected_event 0
+ set elected_env "NONE"
+
+ set res [catch {open $testdir/ELECTION_RESULT.$id} nmid]
+ if { $res != 0 } {
+ return 0
+ }
+ while { [gets $nmid val] != -1 } {
+# puts "result $id: $val"
+ set str [lindex $val 0]
+ if { [is_substr $val UNAVAIL] } {
+ set unavail 1
+ }
+ if { [is_substr $val ELECTED] } {
+ set elected_event 1
+ set elected_env [lindex $val 1]
+ }
+ }
+ close $nmid
+ return 1
+}
+
+proc close_election { i } {
+ global elections_in_progress
+ global noenv_messaging
+ global qtestdir
+
+ if { $noenv_messaging == 1 } {
+ set testdir $qtestdir
+ }
+
+ set t $elections_in_progress($i)
+ puts $t "replclose \$testdir/MSGQUEUEDIR"
+ puts $t "\$dbenv close"
+ close $t
+ unset elections_in_progress($i)
+}
+
+proc cleanup_elections { } {
+ global elect_serial elections_in_progress
+
+ for { set i 0 } { $i <= $elect_serial } { incr i } {
+ if { [info exists elections_in_progress($i)] != 0 } {
+ close_election $i
+ }
+ }
+
+ set elect_serial 0
+}
+
+#
+# This is essentially a copy of test001, but it only does the put/get
+# loop AND it takes an already-opened db handle.
+#
+proc rep_test { method env repdb {nentries 10000} \
+ {start 0} {skip 0} {needpad 0} args } {
+
+ source ./include.tcl
+ global databases_in_memory
+
+ #
+ # Open the db if one isn't given. Close before exit.
+ #
+ if { $repdb == "NULL" } {
+ if { $databases_in_memory == 1 } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open_noerr} -env $env -auto_commit\
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ } else {
+ set db $repdb
+ }
+
+ puts "\t\tRep_test: $method $nentries key/data pairs starting at $start"
+ set did [open $dict]
+
+ # The "start" variable determines the record number to start
+ # with, if we're using record numbers. The "skip" variable
+ # determines which dictionary entry to start with. In normal
+ # use, skip is equal to start.
+
+ if { $skip != 0 } {
+ for { set count 0 } { $count < $skip } { incr count } {
+ gets $did str
+ }
+ }
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ puts "\t\tRep_test.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+
+ # Checkpoint 10 times during the run, but not more
+ # frequently than every 5 entries.
+ set checkfreq [expr $nentries / 10]
+
+ # Abort occasionally during the run.
+ set abortfreq [expr $nentries / 15]
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ #
+ # We want to make sure we send in exactly the same
+ # length data so that LSNs match up for some tests
+ # in replication (rep021).
+ #
+ if { [is_fixed_length $method] == 1 && $needpad } {
+ #
+ # Make it something visible and obvious, 'A'.
+ #
+ set p 65
+ set str [make_fixed_length $method $str $p]
+ set kvals($key) $str
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ if { $checkfreq < 5 } {
+ set checkfreq 5
+ }
+ if { $abortfreq < 3 } {
+ set abortfreq 3
+ }
+ #
+ # Do a few aborted transactions to test that
+ # aborts don't get processed on clients and the
+ # master handles them properly. Just abort
+ # trying to delete the key we just added.
+ #
+ if { $count % $abortfreq == 0 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db del -txn $t $key]
+ error_check_good txn [$t abort] 0
+ }
+ if { $count % $checkfreq == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ incr count
+ }
+ close $did
+ if { $repdb == "NULL" } {
+ error_check_good rep_close [$db close] 0
+ }
+}
+
+#
+# This is essentially a copy of rep_test, but it only does the put/get
+# loop in a long running txn to an open db. We use it for bulk testing
+# because we want to fill the bulk buffer some before sending it out.
+# Bulk buffer gets transmitted on every commit.
+#
+proc rep_test_bulk { method env repdb {nentries 10000} \
+ {start 0} {skip 0} {useoverflow 0} args } {
+ source ./include.tcl
+
+ global overflowword1
+ global overflowword2
+ global databases_in_memory
+
+ if { [is_fixed_length $method] && $useoverflow == 1 } {
+ puts "Skipping overflow for fixed length method $method"
+ return
+ }
+ #
+ # Open the db if one isn't given. Close before exit.
+ #
+ if { $repdb == "NULL" } {
+ if { $databases_in_memory == 1 } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open_noerr -env $env -auto_commit -create \
+ -mode 0644} $largs $omethod $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ } else {
+ set db $repdb
+ }
+
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ puts \
+"\t\tRep_test_bulk: $method $nentries key/data pairs starting at $start"
+ set did [open $dict]
+
+ # The "start" variable determines the record number to start
+ # with, if we're using record numbers. The "skip" variable
+ # determines which dictionary entry to start with. In normal
+ # use, skip is equal to start.
+
+ if { $skip != 0 } {
+ for { set count 0 } { $count < $skip } { incr count } {
+ gets $did str
+ }
+ }
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ puts "\t\tRep_test_bulk.a: put/get loop in 1 txn"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set pid [pid]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ if { [is_fixed_length $method] == 0 } {
+ set str [repeat $str 100]
+ }
+ } else {
+ set key $str.$pid
+ set str [repeat $str 100]
+ }
+ #
+ # For use for overflow test.
+ #
+ if { $useoverflow == 0 } {
+ if { [string length $overflowword1] < \
+ [string length $str] } {
+ set overflowword2 $overflowword1
+ set overflowword1 $str
+ }
+ } else {
+ if { $count == 0 } {
+ set len [string length $overflowword1]
+ set word $overflowword1
+ } else {
+ set len [string length $overflowword2]
+ set word $overflowword1
+ }
+ set rpt [expr 1024 * 1024 / $len]
+ incr rpt
+ set str [repeat $word $rpt]
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ incr count
+ }
+ error_check_good txn [$t commit] 0
+ error_check_good txn_checkpoint [$env txn_checkpoint] 0
+ close $did
+ if { $repdb == "NULL" } {
+ error_check_good rep_close [$db close] 0
+ }
+}
+
+proc rep_test_upg { method env repdb {nentries 10000} \
+ {start 0} {skip 0} {needpad 0} {inmem 0} args } {
+
+ source ./include.tcl
+
+ #
+ # Open the db if one isn't given. Close before exit.
+ #
+ if { $repdb == "NULL" } {
+ if { $inmem == 1 } {
+ set testfile { "" "test.db" }
+ } else {
+ set testfile "test.db"
+ }
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open_noerr} -env $env -auto_commit\
+ -create -mode 0644 $omethod $largs $testfile]
+ error_check_good reptest_db [is_valid_db $db] TRUE
+ } else {
+ set db $repdb
+ }
+
+ set pid [pid]
+ puts "\t\tRep_test_upg($pid): $method $nentries key/data pairs starting at $start"
+ set did [open $dict]
+
+ # The "start" variable determines the record number to start
+ # with, if we're using record numbers. The "skip" variable
+ # determines which dictionary entry to start with. In normal
+ # use, skip is equal to start.
+
+ if { $skip != 0 } {
+ for { set count 0 } { $count < $skip } { incr count } {
+ gets $did str
+ }
+ }
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ puts "\t\tRep_test.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+
+ # Checkpoint 10 times during the run, but not more
+ # frequently than every 5 entries.
+ set checkfreq [expr $nentries / 10]
+
+ # Abort occasionally during the run.
+ set abortfreq [expr $nentries / 15]
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ #
+ # With upgrade test, we run the same test several
+ # times with the same database. We want to have
+ # some overwritten records and some new records.
+ # Therefore append our pid to half the keys.
+ #
+ if { $count % 2 } {
+ set key $str.$pid
+ } else {
+ set key $str
+ }
+ set str [reverse $str]
+ }
+ #
+ # We want to make sure we send in exactly the same
+ # length data so that LSNs match up for some tests
+ # in replication (rep021).
+ #
+ if { [is_fixed_length $method] == 1 && $needpad } {
+ #
+ # Make it something visible and obvious, 'A'.
+ #
+ set p 65
+ set str [make_fixed_length $method $str $p]
+ set kvals($key) $str
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+# puts "rep_test_upg: put $count of $nentries: key $key, data $str"
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ if { $checkfreq < 5 } {
+ set checkfreq 5
+ }
+ if { $abortfreq < 3 } {
+ set abortfreq 3
+ }
+ #
+ # Do a few aborted transactions to test that
+ # aborts don't get processed on clients and the
+ # master handles them properly. Just abort
+ # trying to delete the key we just added.
+ #
+ if { $count % $abortfreq == 0 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db del -txn $t $key]
+ error_check_good txn [$t abort] 0
+ }
+ if { $count % $checkfreq == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ incr count
+ }
+ close $did
+ if { $repdb == "NULL" } {
+ error_check_good rep_close [$db close] 0
+ }
+}
+
+proc rep_test_upg.check { key data } {
+ #
+ # If the key has the pid attached, strip it off before checking.
+ # If the key does not have the pid attached, then it is a recno
+ # and we're done.
+ #
+ set i [string first . $key]
+ if { $i != -1 } {
+ set key [string replace $key $i end]
+ }
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc rep_test_upg.recno.check { key data } {
+ #
+ # If we're a recno database we better not have a pid in the key.
+ # Otherwise we're done.
+ #
+ set i [string first . $key]
+ error_check_good pid $i -1
+}
+
+#
+# This is the basis for a number of simple repmgr test cases. It creates
+# an appointed master and two clients, calls rep_test to process some records
+# and verifies the resulting databases. The following parameters control
+# runtime options:
+# niter - number of records to process
+# inmemdb - put databases in-memory (0, 1)
+# inmemlog - put logs in-memory (0, 1)
+# peer - make the second client a peer of the first client (0, 1)
+# bulk - use bulk processing (0, 1)
+# inmemrep - put replication files in-memory (0, 1)
+#
+proc basic_repmgr_test { method niter tnum inmemdb inmemlog peer bulk \
+ inmemrep largs } {
+ global testdir
+ global rep_verbose
+ global verbose_type
+ global overflowword1
+ global overflowword2
+ global databases_in_memory
+ set overflowword1 "0"
+ set overflowword2 "0"
+ set nsites 3
+
+ # Set databases_in_memory for this test, preserving original value.
+ if { $inmemdb } {
+ set restore_dbinmem $databases_in_memory
+ set databases_in_memory 1
+ }
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # In-memory logs require a large log buffer, and cannot
+ # be used with -txn nosync. Adjust the args.
+ if { $inmemlog } {
+ set logtype "in-memory"
+ } else {
+ set logtype "on-disk"
+ }
+ set logargs [adjust_logargs $logtype]
+ set txnargs [adjust_txnargs $logtype]
+
+ # Determine in-memory replication argument for environments.
+ if { $inmemrep } {
+ set repmemarg "-rep_inmem_files "
+ } else {
+ set repmemarg ""
+ }
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start an appointed master."
+ set ma_envcmd "berkdb_env_noerr -create $logargs $verbargs \
+ -errpfx MASTER -home $masterdir $txnargs -rep -thread \
+ -lock_max_locks 10000 -lock_max_objects 10000 $repmemarg"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ # Open first client
+ puts "\tRepmgr$tnum.b: Start first client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs $logargs \
+ -errpfx CLIENT -home $clientdir $txnargs -rep -thread \
+ -lock_max_locks 10000 -lock_max_objects 10000 $repmemarg"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ # Open second client
+ puts "\tRepmgr$tnum.c: Start second client."
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs $logargs \
+ -errpfx CLIENT2 -home $clientdir2 $txnargs -rep -thread \
+ -lock_max_locks 10000 -lock_max_objects 10000 $repmemarg"
+ set clientenv2 [eval $cl2_envcmd]
+ if { $peer } {
+ $clientenv2 repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1] peer] \
+ -start client
+ } else {
+ $clientenv2 repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -start client
+ }
+ await_startup_done $clientenv2
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.d: Run some transactions at master."
+ if { $bulk } {
+ # Turn on bulk processing on master.
+ error_check_good set_bulk [$masterenv rep_config {bulk on}] 0
+
+ eval rep_test_bulk $method $masterenv NULL $niter 0 0 0 $largs
+
+ # Must turn off bulk because some configs (debug_rop/wop)
+ # generate log records when verifying databases.
+ error_check_good set_bulk [$masterenv rep_config {bulk off}] 0
+ } else {
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+ }
+
+ puts "\tRepmgr$tnum.e: Verifying client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+
+ # For in-memory replication, verify replication files not there.
+ if { $inmemrep } {
+ puts "\tRepmgr$tnum.f: Verify no replication files on disk."
+ no_rep_files_on_disk $masterdir
+ no_rep_files_on_disk $clientdir
+ no_rep_files_on_disk $clientdir2
+ }
+
+ # Restore original databases_in_memory value.
+ if { $inmemdb } {
+ set databases_in_memory $restore_dbinmem
+ }
+
+ error_check_good client2_close [$clientenv2 close] 0
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+#
+# This is the basis for simple repmgr election test cases. It opens three
+# clients of different priorities and makes sure repmgr elects the
+# expected master. Then it shuts the master down and makes sure repmgr
+# elects the expected remaining client master. Then it makes sure the former
+# master can join as a client. The following parameters control
+# runtime options:
+# niter - number of records to process
+# inmemrep - put replication files in-memory (0, 1)
+#
+proc basic_repmgr_election_test { method niter tnum inmemrep largs } {
+ global rep_verbose
+ global testdir
+ global verbose_type
+ set nsites 3
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+ set clientdir3 $testdir/CLIENTDIR3
+
+ file mkdir $clientdir
+ file mkdir $clientdir2
+ file mkdir $clientdir3
+
+ # Determine in-memory replication argument for environments.
+ if { $inmemrep } {
+ set repmemarg "-rep_inmem_files "
+ } else {
+ set repmemarg ""
+ }
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ puts "\tRepmgr$tnum.a: Start three clients."
+
+ # Open first client
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread $repmemarg"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites -pri 100 \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start elect
+
+ # Open second client
+ set cl2_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread $repmemarg"
+ set clientenv2 [eval $cl2_envcmd]
+ $clientenv2 repmgr -ack all -nsites $nsites -pri 30 \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start elect
+
+ # Open third client
+ set cl3_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT3 -home $clientdir3 -txn -rep -thread $repmemarg"
+ set clientenv3 [eval $cl3_envcmd]
+ $clientenv3 repmgr -ack all -nsites $nsites -pri 20 \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -start elect
+
+ puts "\tRepmgr$tnum.b: Elect first client master."
+ await_expected_master $clientenv
+ set masterenv $clientenv
+ set masterdir $clientdir
+ await_startup_done $clientenv2
+ await_startup_done $clientenv3
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.c: Run some transactions at master."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ puts "\tRepmgr$tnum.d: Verify client database contents."
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+ rep_verify $masterdir $masterenv $clientdir3 $clientenv3 1 1 1
+
+ puts "\tRepmgr$tnum.e: Shut down master, elect second client master."
+ error_check_good client_close [$clientenv close] 0
+ await_expected_master $clientenv2
+ set masterenv $clientenv2
+ await_startup_done $clientenv3
+
+ puts "\tRepmgr$tnum.f: Restart former master as client."
+ # Open -recover to clear env region, including startup_done value.
+ set clientenv [eval $cl_envcmd -recover]
+ $clientenv repmgr -ack all -nsites $nsites -pri 100 \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ puts "\tRepmgr$tnum.g: Run some transactions at new master."
+ eval rep_test $method $masterenv NULL $niter $niter 0 0 $largs
+
+ puts "\tRepmgr$tnum.h: Verify client database contents."
+ set masterdir $clientdir2
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ rep_verify $masterdir $masterenv $clientdir3 $clientenv3 1 1 1
+
+ # For in-memory replication, verify replication files not there.
+ if { $inmemrep } {
+ puts "\tRepmgr$tnum.i: Verify no replication files on disk."
+ no_rep_files_on_disk $clientdir
+ no_rep_files_on_disk $clientdir2
+ no_rep_files_on_disk $clientdir3
+ }
+
+ error_check_good client3_close [$clientenv3 close] 0
+ error_check_good client_close [$clientenv close] 0
+ error_check_good client2_close [$clientenv2 close] 0
+}
+
+#
+# This is the basis for simple repmgr internal init test cases. It starts
+# an appointed master and two clients, processing transactions between each
+# additional site. Then it verifies all expected transactions are
+# replicated. The following parameters control runtime options:
+# niter - number of records to process
+# inmemrep - put replication files in-memory (0, 1)
+#
+proc basic_repmgr_init_test { method niter tnum inmemrep largs } {
+ global rep_verbose
+ global testdir
+ global verbose_type
+ set nsites 3
+
+ set verbargs ""
+ if { $rep_verbose == 1 } {
+ set verbargs " -verbose {$verbose_type on} "
+ }
+
+ env_cleanup $testdir
+ set ports [available_ports $nsites]
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+ set clientdir2 $testdir/CLIENTDIR2
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+ file mkdir $clientdir2
+
+ # Determine in-memory replication argument for environments.
+ if { $inmemrep } {
+ set repmemarg "-rep_inmem_files "
+ } else {
+ set repmemarg ""
+ }
+
+ # Use different connection retry timeout values to handle any
+ # collisions from starting sites at the same time by retrying
+ # at different times.
+
+ # Open a master.
+ puts "\tRepmgr$tnum.a: Start a master."
+ set ma_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx MASTER -home $masterdir -txn -rep -thread $repmemarg"
+ set masterenv [eval $ma_envcmd]
+ $masterenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 20000000} \
+ -local [list localhost [lindex $ports 0]] \
+ -start master
+
+ puts "\tRepmgr$tnum.b: Run some transactions at master."
+ eval rep_test $method $masterenv NULL $niter 0 0 0 $largs
+
+ # Open first client
+ puts "\tRepmgr$tnum.c: Start first client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT -home $clientdir -txn -rep -thread $repmemarg"
+ set clientenv [eval $cl_envcmd]
+ $clientenv repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 10000000} \
+ -local [list localhost [lindex $ports 1]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 2]] \
+ -start client
+ await_startup_done $clientenv
+
+ #
+ # Use of -ack all guarantees replication complete before repmgr send
+ # function returns and rep_test finishes.
+ #
+ puts "\tRepmgr$tnum.d: Run some more transactions at master."
+ eval rep_test $method $masterenv NULL $niter $niter 0 0 $largs
+
+ # Open second client
+ puts "\tRepmgr$tnum.e: Start second client."
+ set cl_envcmd "berkdb_env_noerr -create $verbargs \
+ -errpfx CLIENT2 -home $clientdir2 -txn -rep -thread $repmemarg"
+ set clientenv2 [eval $cl_envcmd]
+ $clientenv2 repmgr -ack all -nsites $nsites \
+ -timeout {conn_retry 5000000} \
+ -local [list localhost [lindex $ports 2]] \
+ -remote [list localhost [lindex $ports 0]] \
+ -remote [list localhost [lindex $ports 1]] \
+ -start client
+ await_startup_done $clientenv2
+
+ puts "\tRepmgr$tnum.f: Verifying client database contents."
+ rep_verify $masterdir $masterenv $clientdir $clientenv 1 1 1
+ rep_verify $masterdir $masterenv $clientdir2 $clientenv2 1 1 1
+
+ # For in-memory replication, verify replication files not there.
+ if { $inmemrep } {
+ puts "\tRepmgr$tnum.g: Verify no replication files on disk."
+ no_rep_files_on_disk $masterdir
+ no_rep_files_on_disk $clientdir
+ no_rep_files_on_disk $clientdir2
+ }
+
+ error_check_good client2_close [$clientenv2 close] 0
+ error_check_good client_close [$clientenv close] 0
+ error_check_good masterenv_close [$masterenv close] 0
+}
+
+#
+# Verify that no replication files are present in a given directory.
+# This checks for the gen, egen, internal init, temp db and page db
+# files.
+#
+proc no_rep_files_on_disk { dir } {
+ error_check_good nogen [file exists "$dir/__db.rep.gen"] 0
+ error_check_good noegen [file exists "$dir/__db.rep.egen"] 0
+ error_check_good noinit [file exists "$dir/__db.rep.init"] 0
+ error_check_good notmpdb [file exists "$dir/__db.rep.db"] 0
+ error_check_good nopgdb [file exists "$dir/__db.reppg.db"] 0
+}
+
+proc process_msgs { elist {perm_response 0} {dupp NONE} {errp NONE} \
+ {upg 0} } {
+ if { $perm_response == 1 } {
+ global perm_response_list
+ set perm_response_list {{}}
+ }
+
+ if { [string compare $dupp NONE] != 0 } {
+ upvar $dupp dupmaster
+ set dupmaster 0
+ } else {
+ set dupmaster NONE
+ }
+
+ if { [string compare $errp NONE] != 0 } {
+ upvar $errp errorp
+ set errorp 0
+ set var_name errorp
+ } else {
+ set errorp NONE
+ set var_name NONE
+ }
+
+ set upgcount 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [proc_msgs_once $elist dupmaster $var_name]
+ #
+ # If we're running the upgrade test, we are running only
+ # our own env, we need to loop a bit to allow the other
+ # upgrade procs to run and reply to our messages.
+ #
+ if { $upg == 1 && $upgcount < 10 } {
+ tclsleep 2
+ incr upgcount
+ continue
+ }
+ if { $nproced == 0 } {
+ break
+ } else {
+ set upgcount 0
+ }
+ }
+}
+
+
+proc proc_msgs_once { elist {dupp NONE} {errp NONE} } {
+ global noenv_messaging
+
+ if { [string compare $dupp NONE] != 0 } {
+ upvar $dupp dupmaster
+ set dupmaster 0
+ } else {
+ set dupmaster NONE
+ }
+
+ if { [string compare $errp NONE] != 0 } {
+ upvar $errp errorp
+ set errorp 0
+ set var_name errorp
+ } else {
+ set errorp NONE
+ set var_name NONE
+ }
+
+ set nproced 0
+ foreach pair $elist {
+ set envname [lindex $pair 0]
+ set envid [lindex $pair 1]
+ #
+ # If we need to send in all the other args
+# puts "Call replpq with on $envid"
+ if { $noenv_messaging } {
+ incr nproced [replprocessqueue_noenv $envname $envid \
+ 0 NONE dupmaster $var_name]
+ } else {
+ incr nproced [replprocessqueue $envname $envid \
+ 0 NONE dupmaster $var_name]
+ }
+ #
+ # If the user is expecting to handle an error and we get
+ # one, return the error immediately.
+ #
+ if { $dupmaster != 0 && $dupmaster != "NONE" } {
+ return 0
+ }
+ if { $errorp != 0 && $errorp != "NONE" } {
+# puts "Returning due to error $errorp"
+ return 0
+ }
+ }
+ return $nproced
+}
+
+proc rep_verify { masterdir masterenv clientdir clientenv \
+ {compare_shared_portion 0} {match 1} {logcompare 1} \
+ {dbname "test.db"} {datadir ""} } {
+ global util_path
+ global encrypt
+ global passwd
+ global databases_in_memory
+ global repfiles_in_memory
+ global env_private
+
+ # Whether a named database is in-memory or on-disk, only the
+ # the name itself is passed in. Here we do the syntax adjustment
+ # from "test.db" to { "" "test.db" } for in-memory databases.
+ #
+ if { $databases_in_memory && $dbname != "NULL" } {
+ set dbname " {} $dbname "
+ }
+
+ # Check locations of dbs, repfiles, region files.
+ if { $dbname != "NULL" } {
+ check_db_location $masterenv $dbname $datadir
+ check_db_location $clientenv $dbname $datadir
+ }
+
+ if { $repfiles_in_memory } {
+ no_rep_files_on_disk $masterdir
+ no_rep_files_on_disk $clientdir
+ }
+ if { $env_private } {
+ no_region_files_on_disk $masterdir
+ no_region_files_on_disk $clientdir
+ }
+
+ # The logcompare flag indicates whether to compare logs.
+ # Sometimes we run a test where rep_verify is run twice with
+ # no intervening processing of messages. If that test is
+ # on a build with debug_rop enabled, the master's log is
+ # altered by the first rep_verify, and the second rep_verify
+ # will fail.
+ # To avoid this, skip the log comparison on the second rep_verify
+ # by specifying logcompare == 0.
+ #
+ if { $logcompare } {
+ set msg "Logs and databases"
+ } else {
+ set msg "Databases ($dbname)"
+ }
+
+ if { $match } {
+ puts "\t\tRep_verify: $clientdir: $msg should match"
+ } else {
+ puts "\t\tRep_verify: $clientdir: $msg should not match"
+ }
+ # Check that master and client logs and dbs are identical.
+
+ # Logs first, if specified ...
+ #
+ # If compare_shared_portion is set, run db_printlog on the log
+ # subset that both client and master have. Either the client or
+ # the master may have more (earlier) log files, due to internal
+ # initialization, in-memory log wraparound, or other causes.
+ #
+ if { $logcompare } {
+ error_check_good logcmp \
+ [logcmp $masterenv $clientenv $compare_shared_portion] 0
+
+ if { $dbname == "NULL" } {
+ return
+ }
+ }
+
+ # ... now the databases.
+ #
+ # We're defensive here and throw an error if a database does
+ # not exist. If opening the first database succeeded but the
+ # second failed, we close the first before reporting the error.
+ #
+ if { [catch {eval {berkdb_open_noerr} -env $masterenv\
+ -rdonly $dbname} db1] } {
+ error "FAIL:\
+ Unable to open first db $dbname in rep_verify: $db1"
+ }
+ if { [catch {eval {berkdb_open_noerr} -env $clientenv\
+ -rdonly $dbname} db2] } {
+ error_check_good close_db1 [$db1 close] 0
+ error "FAIL:\
+ Unable to open second db $dbname in rep_verify: $db2"
+ }
+
+ # db_compare uses the database handles to do the comparison, and
+ # we pass in the $mumbledir/$dbname string as a label to make it
+ # easier to identify the offending database in case of failure.
+ # Therefore this will work for both in-memory and on-disk databases.
+ if { $match } {
+ error_check_good [concat comparedbs. $dbname] [db_compare \
+ $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0
+ } else {
+ error_check_bad comparedbs [db_compare \
+ $db1 $db2 $masterdir/$dbname $clientdir/$dbname] 0
+ }
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+}
+
+proc rep_event { env eventlist } {
+ global startup_done
+ global elected_event
+ global elected_env
+
+ set event [lindex $eventlist 0]
+# puts "rep_event: Got event $event on env $env"
+ set eventlength [llength $eventlist]
+
+ if { $event == "startupdone" } {
+ error_check_good event_nodata $eventlength 1
+ set startup_done 1
+ }
+ if { $event == "elected" } {
+ error_check_good event_nodata $eventlength 1
+ set elected_event 1
+ set elected_env $env
+ }
+ if { $event == "newmaster" } {
+ error_check_good eiddata $eventlength 2
+ set event_newmasterid [lindex $eventlist 1]
+ }
+ return
+}
+
+# Return a list of TCP port numbers that are not currently in use on
+# the local system. Note that this doesn't actually reserve the
+# ports, so it's possible that by the time the caller tries to use
+# them, another process could have taken one of them. But for our
+# purposes that's unlikely enough that this is still useful: it's
+# still better than trying to find hard-coded port numbers that will
+# always be available.
+#
+proc available_ports { n } {
+ set ports {}
+ set socks {}
+
+ while {[incr n -1] >= 0} {
+ set sock [socket -server Unused -myaddr localhost 0]
+ set port [lindex [fconfigure $sock -sockname] 2]
+
+ lappend socks $sock
+ lappend ports $port
+ }
+
+ foreach sock $socks {
+ close $sock
+ }
+ return $ports
+}
+
+# Wait (a limited amount of time) for an arbitrary condition to become true,
+# polling once per second. If time runs out we throw an error: a successful
+# return implies the condition is indeed true.
+#
+proc await_condition { cond { limit 20 } } {
+ for {set i 0} {$i < $limit} {incr i} {
+ if {[uplevel 1 [list expr $cond]]} {
+ return
+ }
+ tclsleep 1
+ }
+ error "FAIL: condition \{$cond\} not achieved in $limit seconds."
+}
+
+proc await_startup_done { env { limit 20 } } {
+ await_condition {[stat_field $env rep_stat "Startup complete"]} $limit
+}
+
+# Wait (a limited amount of time) for an election to yield the expected
+# environment as winner.
+#
+proc await_expected_master { env { limit 20 } } {
+ await_condition {[stat_field $env rep_stat "Role"] == "master"} $limit
+}
+
+proc do_leaseop { env db method key envlist { domsgs 1 } } {
+ global alphabet
+
+ #
+ # Put a txn to the database. Process messages to envlist
+ # if directed to do so. Read data on the master, ignoring
+ # leases (should always succeed).
+ #
+ set num [berkdb random_int 1 100]
+ set data $alphabet.$num
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ if { $domsgs } {
+ process_msgs $envlist
+ }
+
+ #
+ # Now make sure we can successfully read on the master
+ # if we ignore leases. That should always work. The
+ # caller will do any lease related calls and checks
+ # that are specific to the test.
+ #
+ set kd [$db get -nolease $key]
+ set curs [$db cursor]
+ set ckd [$curs get -nolease -set $key]
+ $curs close
+ error_check_good kd [llength $kd] 1
+ error_check_good ckd [llength $ckd] 1
+}
+
+#
+# Get the given key, expecting status depending on whether leases
+# are currently expected to be valid or not.
+#
+proc check_leaseget { db key getarg status } {
+ set stat [catch {eval {$db get} $getarg $key} kd]
+ if { $status != 0 } {
+ error_check_good get_result $stat 1
+ error_check_good kd_check \
+ [is_substr $kd $status] 1
+ } else {
+ error_check_good get_result_good $stat $status
+ error_check_good dbkey [lindex [lindex $kd 0] 0] $key
+ }
+ set curs [$db cursor]
+ set stat [catch {eval {$curs get} $getarg -set $key} kd]
+ if { $status != 0 } {
+ error_check_good get_result2 $stat 1
+ error_check_good kd_check \
+ [is_substr $kd $status] 1
+ } else {
+ error_check_good get_result2_good $stat $status
+ error_check_good dbckey [lindex [lindex $kd 0] 0] $key
+ }
+ $curs close
+}
+
+# Simple utility to check a client database for expected values. It does not
+# handle dup keys.
+#
+proc verify_client_data { env db items } {
+ set dbp [berkdb open -env $env $db]
+ foreach i $items {
+ foreach {key expected_value} $i {
+ set results [$dbp get $key]
+ error_check_good result_length [llength $results] 1
+ set value [lindex $results 0 1]
+ error_check_good expected_value $value $expected_value
+ }
+ }
+ $dbp close
+}
+
+proc make_dbconfig { dir cnfs } {
+ global rep_verbose
+
+ set f [open "$dir/DB_CONFIG" "w"]
+ foreach line $cnfs {
+ puts $f $line
+ }
+ if {$rep_verbose} {
+ puts $f "set_verbose DB_VERB_REPLICATION"
+ }
+ close $f
+}
+
+proc open_site_prog { cmds } {
+
+ set site_prog [setup_site_prog]
+
+ set s [open "| $site_prog" "r+"]
+ fconfigure $s -buffering line
+ set synced yes
+ foreach cmd $cmds {
+ puts $s $cmd
+ if {[lindex $cmd 0] == "start"} {
+ gets $s
+ set synced yes
+ } else {
+ set synced no
+ }
+ }
+ if {! $synced} {
+ puts $s "echo done"
+ gets $s
+ }
+ return $s
+}
+
+proc setup_site_prog { } {
+ source ./include.tcl
+
+ # Generate the proper executable name for the system.
+ if { $is_windows_test } {
+ set repsite_executable db_repsite.exe
+ } else {
+ set repsite_executable db_repsite
+ }
+
+ # Check whether the executable exists.
+ if { [file exists $util_path/$repsite_executable] == 0 } {
+ error "Skipping: db_repsite executable\
+ not found. Is it built?"
+ } else {
+ set site_prog $util_path/$repsite_executable
+ }
+ return $site_prog
+}
+
+proc next_expected_lsn { env } {
+ return [stat_field $env rep_stat "Next LSN expected"]
+}
+
+proc lsn_file { lsn } {
+ if { [llength $lsn] != 2 } {
+ error "not a valid LSN: $lsn"
+ }
+
+ return [lindex $lsn 0]
+}
+
+proc assert_rep_flag { dir flag value } {
+ global util_path
+
+ set stat [exec $util_path/db_stat -N -RA -h $dir]
+ set present [is_substr $stat $flag]
+ error_check_good expected.flag.$flag $present $value
+}
diff --git a/db-4.8.30/test/reputilsnoenv.tcl b/db-4.8.30/test/reputilsnoenv.tcl
new file mode 100644
index 0000000..9ebea0f
--- /dev/null
+++ b/db-4.8.30/test/reputilsnoenv.tcl
@@ -0,0 +1,509 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# The procs in this file are used for replication messaging
+# ONLY when the default mechanism of setting up a queue of
+# messages in a environment is not possible. This situation
+# is fairly rare, but it is necessary when a replication
+# test simultaneously runs different versions of Berkeley DB,
+# because different versions cannot share an env.
+#
+# Note, all procs should be named with the suffix _noenv
+# so it's explicit that we are using them.
+#
+# Close up a replication group - close all message dbs.
+proc replclose_noenv { queuedir } {
+ global queuedbs machids
+
+ set dbs [array names queuedbs]
+ foreach tofrom $dbs {
+ set handle $queuedbs($tofrom)
+ error_check_good db_close [$handle close] 0
+ unset queuedbs($tofrom)
+ }
+
+ set machids {}
+}
+
+# Create a replication group for testing.
+proc replsetup_noenv { queuedir } {
+ global queuedbs machids
+
+ file mkdir $queuedir
+
+ # If there are any leftover handles, get rid of them.
+ set dbs [array names queuedbs]
+ foreach tofrom $dbs {
+ unset queuedbs($tofrom)
+ }
+ set machids {}
+}
+
+# Send function for replication.
+proc replsend_noenv { control rec fromid toid flags lsn } {
+ global is_repchild
+ global queuedbs machids
+ global drop drop_msg
+ global perm_sent_list
+ global anywhere
+ global qtestdir testdir
+
+ if { ![info exists qtestdir] } {
+ set qtestdir $testdir
+ }
+ set queuedir $qtestdir/MSGQUEUEDIR
+ set permflags [lsearch $flags "perm"]
+ if { [llength $perm_sent_list] != 0 && $permflags != -1 } {
+# puts "replsend_noenv sent perm message, LSN $lsn"
+ lappend perm_sent_list $lsn
+ }
+
+ #
+ # If we are testing with dropped messages, then we drop every
+ # $drop_msg time. If we do that just return 0 and don't do
+ # anything.
+ #
+ if { $drop != 0 } {
+ incr drop
+ if { $drop == $drop_msg } {
+ set drop 1
+ return 0
+ }
+ }
+ # XXX
+ # -1 is DB_BROADCAST_EID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ set m NULL
+ # If we can send this anywhere, send it to the first id
+ # we find that is neither toid or fromid. If we don't
+ # find any other candidates, this falls back to the
+ # original toid.
+ if { $anywhere != 0 } {
+ set anyflags [lsearch $flags "any"]
+ if { $anyflags != -1 } {
+ foreach m $machids {
+ if { $m == $fromid || $m == $toid } {
+ continue
+ }
+ set machlist [list $m]
+ break
+ }
+ }
+ }
+ #
+ # If we didn't find a different site, fall back
+ # to the toid.
+ #
+ if { $m == "NULL" } {
+ set machlist [list $toid]
+ }
+ }
+ foreach m $machlist {
+ # Do not broadcast to self.
+ if { $m == $fromid } {
+ continue
+ }
+ # Find the handle for the right message file.
+ set pid [pid]
+ set db $queuedbs($m.$fromid.$pid)
+ set stat [catch {$db put -append [list $control $rec $fromid]} ret]
+ }
+ if { $is_repchild } {
+ replready_noenv $fromid from
+ }
+
+ return 0
+}
+
+#
+proc replmsglen_noenv { machid {tf "to"}} {
+ global queuedbs qtestdir testdir
+
+ if { ![info exists qtestdir] } {
+ set qtestdir $testdir
+ }
+ set queuedir $qtestdir/MSGQUEUEDIR
+ set orig [pwd]
+
+ cd $queuedir
+ if { $tf == "to" } {
+ set msgdbs [glob -nocomplain ready.$machid.*]
+ } else {
+ set msgdbs [glob -nocomplain ready.*.$machid.*]
+ }
+ cd $orig
+ return [llength $msgdbs]
+}
+
+# Discard all the pending messages for a particular site.
+proc replclear_noenv { machid {tf "to"}} {
+ global queuedbs qtestdir testdir
+
+ if { ![info exists qtestdir] } {
+ set qtestdir $testdir
+ }
+ set queuedir $qtestdir/MSGQUEUEDIR
+ set orig [pwd]
+
+ cd $queuedir
+ if { $tf == "to" } {
+ set msgdbs [glob -nocomplain ready.$machid.*]
+ } else {
+ set msgdbs [glob -nocomplain ready.*.$machid.*]
+ }
+ foreach m $msgdbs {
+ file delete -force $m
+ }
+ cd $orig
+ set dbs [array names queuedbs]
+ foreach tofrom $dbs {
+ # Process only messages _to_ the specified machid.
+ if { [string match $machid.* $tofrom] == 1 } {
+ set db $queuedbs($tofrom)
+ set dbc [$db cursor]
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good \
+ replclear($machid)_del [$dbc del] 0
+ }
+ error_check_good replclear($db)_dbc_close [$dbc close] 0
+ }
+ }
+ cd $queuedir
+ if { $tf == "to" } {
+ set msgdbs [glob -nocomplain temp.$machid.*]
+ } else {
+ set msgdbs [glob -nocomplain temp.*.$machid.*]
+ }
+ foreach m $msgdbs {
+# file delete -force $m
+ }
+ cd $orig
+}
+
+# Makes messages available to replprocessqueue by closing and
+# renaming the message files. We ready the files for one machine
+# ID at a time -- just those "to" or "from" the machine we want to
+# process, depending on 'tf'.
+proc replready_noenv { machid tf } {
+ global queuedbs machids
+ global counter
+ global qtestdir testdir
+
+ if { ![info exists qtestdir] } {
+ set qtestdir $testdir
+ }
+ set queuedir $qtestdir/MSGQUEUEDIR
+
+ set pid [pid]
+ #
+ # Close the temporary message files for the specified machine.
+ # Only close it if there are messages available.
+ #
+ set dbs [array names queuedbs]
+ set closed {}
+ foreach tofrom $dbs {
+ set toidx [string first . $tofrom]
+ set toid [string replace $tofrom $toidx end]
+ set fidx [expr $toidx + 1]
+ set fromidx [string first . $tofrom $fidx]
+ #
+ # First chop off the end, then chop off the toid
+ # in the beginning.
+ #
+ set fromid [string replace $tofrom $fromidx end]
+ set fromid [string replace $fromid 0 $toidx]
+ if { ($tf == "to" && $machid == $toid) || \
+ ($tf == "from" && $machid == $fromid) } {
+ set nkeys [stat_field $queuedbs($tofrom) \
+ stat "Number of keys"]
+ if { $nkeys != 0 } {
+ lappend closed \
+ [list $toid $fromid temp.$tofrom]
+ error_check_good temp_close \
+ [$queuedbs($tofrom) close] 0
+ }
+ }
+ }
+
+ # Rename the message files.
+ set cwd [pwd]
+ foreach filename $closed {
+ set toid [lindex $filename 0]
+ set fromid [lindex $filename 1]
+ set fname [lindex $filename 2]
+ set tofrom [string replace $fname 0 4]
+ incr counter($machid)
+ cd $queuedir
+# puts "$queuedir: Msg ready $fname to ready.$tofrom.$counter($machid)"
+ file rename -force $fname ready.$tofrom.$counter($machid)
+ cd $cwd
+ replsetuptempfile_noenv $toid $fromid $queuedir
+
+ }
+}
+
+# Add a machine to a replication environment. This checks
+# that we have not already established that machine id, and
+# adds the machid to the list of ids.
+proc repladd_noenv { machid } {
+ global queuedbs machids counter qtestdir testdir
+
+ if { ![info exists qtestdir] } {
+ set qtestdir $testdir
+ }
+ set queuedir $qtestdir/MSGQUEUEDIR
+ if { [info exists machids] } {
+ if { [lsearch -exact $machids $machid] >= 0 } {
+ error "FAIL: repladd_noenv: machid $machid already exists."
+ }
+ }
+
+ set counter($machid) 0
+ lappend machids $machid
+
+ # Create all the databases that receive messages sent _to_
+ # the new machid.
+ replcreatetofiles_noenv $machid $queuedir
+
+ # Create all the databases that receive messages sent _from_
+ # the new machid.
+ replcreatefromfiles_noenv $machid $queuedir
+}
+
+# Creates all the databases that a machid needs for receiving messages
+# from other participants in a replication group. Used when first
+# establishing the temp files, but also used whenever replready_noenv moves
+# the temp files away, because we'll need new files for any future messages.
+proc replcreatetofiles_noenv { toid queuedir } {
+ global machids
+
+ foreach m $machids {
+ # We don't need a file for a machid to send itself messages.
+ if { $m == $toid } {
+ continue
+ }
+ replsetuptempfile_noenv $toid $m $queuedir
+ }
+}
+
+# Creates all the databases that a machid needs for sending messages
+# to other participants in a replication group. Used when first
+# establishing the temp files only. Replready moves files based on
+# recipient, so we recreate files based on the recipient, also.
+proc replcreatefromfiles_noenv { fromid queuedir } {
+ global machids
+
+ foreach m $machids {
+ # We don't need a file for a machid to send itself messages.
+ if { $m == $fromid } {
+ continue
+ }
+ replsetuptempfile_noenv $m $fromid $queuedir
+ }
+}
+
+proc replsetuptempfile_noenv { to from queuedir } {
+ global queuedbs
+
+ set pid [pid]
+# puts "Open new temp.$to.$from.$pid"
+ set queuedbs($to.$from.$pid) [berkdb_open -create -excl -recno\
+ -renumber $queuedir/temp.$to.$from.$pid]
+ error_check_good open_queuedbs [is_valid_db $queuedbs($to.$from.$pid)] TRUE
+}
+
+# Process a queue of messages, skipping every "skip_interval" entry.
+# We traverse the entire queue, but since we skip some messages, we
+# may end up leaving things in the queue, which should get picked up
+# on a later run.
+proc replprocessqueue_noenv { dbenv machid { skip_interval 0 } { hold_electp NONE } \
+ { dupmasterp NONE } { errp NONE } } {
+ global errorCode
+ global perm_response_list
+ global qtestdir testdir
+
+ # hold_electp is a call-by-reference variable which lets our caller
+ # know we need to hold an election.
+ if { [string compare $hold_electp NONE] != 0 } {
+ upvar $hold_electp hold_elect
+ }
+ set hold_elect 0
+
+ # dupmasterp is a call-by-reference variable which lets our caller
+ # know we have a duplicate master.
+ if { [string compare $dupmasterp NONE] != 0 } {
+ upvar $dupmasterp dupmaster
+ }
+ set dupmaster 0
+
+ # errp is a call-by-reference variable which lets our caller
+ # know we have gotten an error (that they expect).
+ if { [string compare $errp NONE] != 0 } {
+ upvar $errp errorp
+ }
+ set errorp 0
+
+ set nproced 0
+
+ set queuedir $qtestdir/MSGQUEUEDIR
+# puts "replprocessqueue_noenv: Make ready messages to eid $machid"
+
+ # Change directories temporarily so we get just the msg file name.
+ set cwd [pwd]
+ cd $queuedir
+ set msgdbs [glob -nocomplain ready.$machid.*]
+# puts "$queuedir.$machid: My messages: $msgdbs"
+ cd $cwd
+
+ foreach msgdb $msgdbs {
+ set db [berkdb_open $queuedir/$msgdb]
+ set dbc [$db cursor]
+
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $db] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set data [lindex [lindex $dbt 0] 1]
+ set recno [lindex [lindex $dbt 0] 0]
+
+ # If skip_interval is nonzero, we want to process
+ # messages out of order. We do this in a simple but
+ # slimy way -- continue walking with the cursor
+ # without processing the message or deleting it from
+ # the queue, but do increment "nproced". The way
+ # this proc is normally used, the precise value of
+ # nproced doesn't matter--we just don't assume the
+ # queues are empty if it's nonzero. Thus, if we
+ # contrive to make sure it's nonzero, we'll always
+ # come back to records we've skipped on a later call
+ # to replprocessqueue. (If there really are no records,
+ # we'll never get here.)
+ #
+ # Skip every skip_interval'th record (and use a
+ # remainder other than zero so that we're guaranteed
+ # to really process at least one record on every call).
+ if { $skip_interval != 0 } {
+ if { $nproced % $skip_interval == 1 } {
+ incr nproced
+ set dbt [$dbc get -next]
+ continue
+ }
+ }
+
+ # We need to remove the current message from the
+ # queue, because we're about to end the transaction
+ # and someone else processing messages might come in
+ # and reprocess this message which would be bad.
+ #
+ error_check_good queue_remove [$dbc del] 0
+
+ # We have to play an ugly cursor game here: we
+ # currently hold a lock on the page of messages, but
+ # rep_process_message might need to lock the page with
+ # a different cursor in order to send a response. So
+ # save the next recno, close the cursor, and then
+ # reopen and reset the cursor. If someone else is
+ # processing this queue, our entry might have gone
+ # away, and we need to be able to handle that.
+ #
+# error_check_good dbc_process_close [$dbc close] 0
+
+ set ret [catch {$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] \
+ [lindex $data 1]} res]
+
+ # Save all ISPERM and NOTPERM responses so we can
+ # compare their LSNs to the LSN in the log. The
+ # variable perm_response_list holds the entire
+ # response so we can extract responses and LSNs as
+ # needed.
+ #
+ if { [llength $perm_response_list] != 0 && \
+ ([is_substr $res ISPERM] || [is_substr $res NOTPERM]) } {
+ lappend perm_response_list $res
+ }
+
+ if { $ret != 0 } {
+ if { [string compare $errp NONE] != 0 } {
+ set errorp "$dbenv $machid $res"
+ } else {
+ error "FAIL:[timestamp]\
+ rep_process_message returned $res"
+ }
+ }
+
+ incr nproced
+ if { $ret == 0 } {
+ set rettype [lindex $res 0]
+ set retval [lindex $res 1]
+ #
+ # Do nothing for 0 and NEWSITE
+ #
+ if { [is_substr $rettype HOLDELECTION] } {
+ set hold_elect 1
+ }
+ if { [is_substr $rettype DUPMASTER] } {
+ set dupmaster "1 $dbenv $machid"
+ }
+ if { [is_substr $rettype NOTPERM] || \
+ [is_substr $rettype ISPERM] } {
+ set lsnfile [lindex $retval 0]
+ set lsnoff [lindex $retval 1]
+ }
+ }
+
+ if { $errorp != 0 } {
+ # Break on an error, caller wants to handle it.
+ break
+ }
+ if { $hold_elect == 1 } {
+ # Break on a HOLDELECTION, for the same reason.
+ break
+ }
+ if { $dupmaster == 1 } {
+ # Break on a DUPMASTER, for the same reason.
+ break
+ }
+
+ }
+ error_check_good dbc_close [$dbc close] 0
+
+ #
+ # Check the number of keys remaining because we only
+ # want to rename to done, message file that are
+ # fully processed. Some message types might break
+ # out of the loop early and we want to process
+ # the remaining messages the next time through.
+ #
+ set nkeys [stat_field $db stat "Number of keys"]
+ error_check_good db_close [$db close] 0
+
+ if { $nkeys == 0 } {
+ set dbname [string replace $msgdb 0 5 done.]
+ #
+ # We have to do a special dance to get rid of the
+ # empty messaging files because of the way Windows
+ # handles open files marked for deletion.
+ # On Windows, a file is marked for deletion but
+ # does not actually get deleted until the last handle
+ # is closed. This causes a problem when a test tries
+ # to create a new file with a previously-used name,
+ # and Windows believes the old file still exists.
+ # Therefore, we rename the files before deleting them,
+ # to guarantee they are out of the way.
+ #
+ file rename -force $queuedir/$msgdb $queuedir/$dbname
+ file delete -force $queuedir/$dbname
+ }
+ }
+ # Return the number of messages processed.
+ return $nproced
+}
+
diff --git a/db-4.8.30/test/rpc001.tcl b/db-4.8.30/test/rpc001.tcl
new file mode 100644
index 0000000..5f0e07a
--- /dev/null
+++ b/db-4.8.30/test/rpc001.tcl
@@ -0,0 +1,476 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc001
+# TEST Test RPC server timeouts for cursor, txn and env handles.
+proc rpc001 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global is_je_test
+ global rpc_svc
+ source ./include.tcl
+
+ #
+ # First test timeouts on server.
+ #
+ set ttime 5
+ set itime 10
+ puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
+ puts "Rpc001: Using $rpc_svc"
+ set dpid [rpc_server_start 0 30 -t $ttime -I $itime]
+ puts "\tRpc001.a: Started server, pid $dpid"
+
+ #
+ # Wrap the whole test in a catch statement so we can still kill
+ # the rpc server even if the test fails.
+ #
+ set status [catch {
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc001.b: Creating environment"
+
+ set testfile "rpc001.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc001.c: Opening a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree \
+ -mode 0644} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set curs_list {}
+ set txn_list {}
+ puts "\tRpc001.d: Basic timeout test"
+ puts "\tRpc001.d1: Starting a transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ lappend txn_list $txn
+
+ puts "\tRpc001.d2: Open a cursor in that transaction"
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d3: Duplicate that cursor"
+ set dbc [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ if { !$is_je_test } {
+ puts "\tRpc001.d4: Starting a nested transaction"
+ set txn [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ }
+
+ puts "\tRpc001.d5: Create a cursor, no transaction"
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d6: Timeout cursor and transactions"
+ set sleeptime [expr $ttime + 2]
+ tclsleep $sleeptime
+
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$db stat} ret]
+ error_check_good dbstat $stat 0
+
+ #
+ # Check that every handle we opened above is timed out
+ #
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ set txn_list {}
+
+ if { !$is_je_test } {
+ set ntxns 8
+ puts "\tRpc001.e: Nested ($ntxns x $ntxns) txn activity test"
+ puts "\tRpc001.e1: Starting parent transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ set parent_txn $txn
+
+ #
+ # First set a breadth of 'ntxns'
+ # We need 2 from this set for testing later on. Just
+ # set them up separately first.
+ #
+ puts "\tRpc001.e2: Creating $ntxns child transactions"
+ set child0 [$env txn -parent $parent_txn]
+ error_check_good txn_begin \
+ [is_valid_txn $child0 $env] TRUE
+ set child1 [$env txn -parent $parent_txn]
+ error_check_good txn_begin \
+ [is_valid_txn $child1 $env] TRUE
+
+ for {set i 2} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $parent_txn]
+ error_check_good txn_begin \
+ [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ }
+
+ #
+ # Now make one 'ntxns' deeply nested.
+ # Add one more for testing later on separately.
+ #
+ puts "\tRpc001.e3: Creating $ntxns nested child transactions"
+ for {set i 0} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $last_txn]
+ error_check_good txn_begin \
+ [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ }
+ set last_parent $last_txn
+ set last_txn [$env txn -parent $last_parent]
+ error_check_good txn_begin \
+ [is_valid_txn $last_txn $env] TRUE
+
+ puts "\tRpc001.e4: Open a cursor in deepest transaction"
+ set dbc [$db cursor -txn $last_txn]
+ error_check_good db_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tRpc001.e5: Duplicate that cursor"
+ set dbcdup [$dbc dup]
+ error_check_good db_cursor \
+ [is_valid_cursor $dbcdup $db] TRUE
+ lappend curs_list $dbcdup
+
+ puts "\tRpc001.f: Timeout then activate duplicate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbcdup close} ret]
+ error_check_good dup_close:$dbcdup $stat 0
+ error_check_good dup_close:$dbcdup $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We
+ # will try to begin another child tnx using the parent.
+ # We expect that to succeed. Immediately commit that
+ # txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.g: Timeout, then activate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbc close} ret]
+ error_check_good dbc_close:$dbc $stat 0
+ error_check_good dbc_close:$dbc $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We
+ # will try to begin another child tnx using the parent.
+ # We expect that to succeed. Immediately commit that
+ # txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.h: Timeout, then activate child txn"
+ tclsleep $sleeptime
+ set stat [catch {$child0 commit} ret]
+ error_check_good child_commit $stat 0
+ error_check_good child_commit:$child0 $ret 0
+
+ #
+ # Make sure that our nested txn is not timed out. We
+ # will try to begin another child tnx using the parent.
+ # We expect that to succeed. Immediately commit that
+ # txn.
+ #
+ set stat \
+ [catch {$env txn -parent $last_parent} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.i: Timeout, then activate nested txn"
+ tclsleep $sleeptime
+ set stat [catch {$last_txn commit} ret]
+ error_check_good lasttxn_commit $stat 0
+ error_check_good lasttxn_commit:$child0 $ret 0
+
+ #
+ # Make sure that our child txn is not timed out. We
+ # should be able to commit it.
+ #
+ set stat [catch {$child1 commit} ret]
+ error_check_good child_commit:$child1 $stat 0
+ error_check_good child_commit:$child1 $ret 0
+
+ #
+ # Clean up. They were inserted in LIFO order, so we
+ # should just be able to commit them all.
+ #
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+ }
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 0
+
+ rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0
+ rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1
+
+ puts "\tRpc001.l: Timeout idle env handle"
+ set sleeptime [expr $itime + 2]
+ tclsleep $sleeptime
+
+ #
+ # We need to do another operation to time out the environment
+ # handle. Open another environment, with an invalid home
+ # directory.
+ #
+ set stat [catch {eval {berkdb_env_noerr -home "$home.fail" \
+ -server $rpc_server}} ret]
+ error_check_good env_open $stat 1
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_timeout \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ } res]
+ if { $status != 0 } {
+ puts $res
+ }
+ tclkill $dpid
+}
+
+proc rpc_timeoutjoin {env msg sleeptime use_txn} {
+ #
+ # Check join cursors now.
+ #
+ puts -nonewline "\t$msg: Test join cursors and timeouts"
+ if { $use_txn } {
+ puts " (using txns)"
+ set txnflag "-auto_commit"
+ } else {
+ puts " (without txns)"
+ set txnflag ""
+ }
+ #
+ # Set up a simple set of join databases
+ #
+ puts "\t${msg}0: Set up join databases"
+ set fruit {
+ {blue blueberry}
+ {red apple} {red cherry} {red raspberry}
+ {yellow lemon} {yellow pear}
+ }
+ set price {
+ {expen blueberry} {expen cherry} {expen raspberry}
+ {inexp apple} {inexp lemon} {inexp pear}
+ }
+ set dessert {
+ {blueberry cobbler} {cherry cobbler} {pear cobbler}
+ {apple pie} {raspberry pie} {lemon pie}
+ }
+ set fdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup -dupsort fruit.db]
+ error_check_good dbopen [is_valid_db $fdb] TRUE
+ set pdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup -dupsort price.db]
+ error_check_good dbopen [is_valid_db $pdb] TRUE
+ set ddb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup -dupsort dessert.db]
+ error_check_good dbopen [is_valid_db $ddb] TRUE
+ foreach kd $fruit {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$fdb put} {$k $d}]
+ error_check_good fruit_put $ret 0
+ }
+ error_check_good sync [$fdb sync] 0
+ foreach kd $price {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$pdb put} {$k $d}]
+ error_check_good price_put $ret 0
+ }
+ error_check_good sync [$pdb sync] 0
+ foreach kd $dessert {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$ddb put} {$k $d}]
+ error_check_good dessert_put $ret 0
+ }
+ error_check_good sync [$ddb sync] 0
+
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 0
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 1
+
+ error_check_good ddb:close [$ddb close] 0
+ error_check_good pdb:close [$pdb close] 0
+ error_check_good fdb:close [$fdb close] 0
+ error_check_good ddb:remove [$env dbremove dessert.db] 0
+ error_check_good pdb:remove [$env dbremove price.db] 0
+ error_check_good fdb:remove [$env dbremove fruit.db] 0
+}
+
+proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
+ global errorInfo
+ global is_je_test
+
+ #
+ # Start a parent and child transaction. We'll do our join in
+ # the child transaction just to make sure everything gets timed
+ # out correctly.
+ #
+ set curs_list {}
+ set txn_list {}
+ set msgnum [expr $op * 2 + 1]
+ if { $use_txn } {
+ puts "\t$msg$msgnum: Set up txns and join cursor"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ if { !$is_je_test } {
+ set child0 [$env txn -parent $txn]
+ error_check_good txn_begin \
+ [is_valid_txn $child0 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child0]
+ set child1 [$env txn -parent $txn]
+ error_check_good txn_begin \
+ [is_valid_txn $child1 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child1]
+ } else {
+ set child0 $txn
+ set child1 $txn
+ }
+ set txncmd "-txn $child0"
+ } else {
+ puts "\t$msg$msgnum: Set up join cursor"
+ set txncmd ""
+ }
+
+ #
+ # Start a cursor, (using txn child0 in the fruit and price dbs, if
+ # needed). # Just pick something simple to join on.
+ # Then call join on the dessert db.
+ #
+ set fkey yellow
+ set pkey inexp
+ set fdbc [eval $fdb cursor $txncmd]
+ error_check_good fdb_cursor [is_valid_cursor $fdbc $fdb] TRUE
+ set ret [$fdbc get -set $fkey]
+ error_check_bad fget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good fget:set:key $k $fkey
+ set curs_list [linsert $curs_list 0 $fdbc]
+
+ set pdbc [eval $pdb cursor $txncmd]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set ret [$pdbc get -set $pkey]
+ error_check_bad pget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good pget:set:key $k $pkey
+ set curs_list [linsert $curs_list 0 $pdbc]
+
+ set jdbc [$ddb join $fdbc $pdbc]
+ error_check_good join_cursor [is_valid_cursor $jdbc $ddb] TRUE
+ set ret [$jdbc get]
+ error_check_bad jget [llength $ret] 0
+
+ set msgnum [expr $op * 2 + 2]
+ if { $op == 1 } {
+ puts -nonewline "\t$msg$msgnum: Timeout all cursors"
+ if { $use_txn } {
+ puts " and txns"
+ } else {
+ puts ""
+ }
+ } else {
+ puts "\t$msg$msgnum: Timeout, then activate join cursor"
+ }
+
+ tclsleep $sleep
+
+ if { $op == 1 } {
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ #
+ # Check that join cursor is timed out.
+ #
+ set stat [catch {$jdbc close} ret]
+ error_check_good dbc_close:$jdbc $stat 1
+ error_check_good dbc_timeout:$jdbc \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ #
+ # Now the server may or may not timeout constituent
+ # cursors when it times out the join cursor. So, just
+ # sleep again and then they should timeout.
+ #
+ tclsleep $sleep
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ } else {
+ set stat [catch {$jdbc get} ret]
+ error_check_good jget.stat $stat 0
+ error_check_bad jget [llength $ret] 0
+ set curs_list [linsert $curs_list 0 $jdbc]
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 0
+ error_check_good dbc_close:$c $ret 0
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+ }
+}
diff --git a/db-4.8.30/test/rpc002.tcl b/db-4.8.30/test/rpc002.tcl
new file mode 100644
index 0000000..95164b8
--- /dev/null
+++ b/db-4.8.30/test/rpc002.tcl
@@ -0,0 +1,161 @@
+# Sel the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc002
+# TEST Test invalid RPC functions and make sure we error them correctly
+# TEST Test server home directory error cases
+proc rpc002 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ set testfile "rpc002.db"
+ set home [file tail $rpc_testdir]
+ #
+ # First start the server.
+ #
+ puts "Rpc002: Error and Unsupported interface test"
+ puts "Rpc002: Using $rpc_svc"
+ set dpid [rpc_server_start]
+ puts "\tRpc002.a: Started server, pid $dpid"
+
+ #
+ # Wrap the whole test in a catch statement so we can still
+ # kill the rpc server even if the test fails.
+ #
+ set status [catch {
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc002.b: Unsupported env options"
+ #
+ # Test each "pre-open" option for env's. These need to be
+ # tested on the 'berkdb_env' line.
+ #
+ set rlist {
+ { "-data_dir $rpc_testdir" "Rpc002.b0"}
+ { "-log_buffer 512" "Rpc002.b1"}
+ { "-log_dir $rpc_testdir" "Rpc002.b2"}
+ { "-log_max 100" "Rpc002.b3"}
+ { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"}
+ { "-lock_detect default" "Rpc002.b5"}
+ { "-lock_max_locks 100" "Rpc002.b6"}
+ { "-lock_max_objects 100" "Rpc002.b6"}
+ { "-lock_max_lockers 100" "Rpc002.b6"}
+ { "-mpool_mmap_size 100" "Rpc002.b7"}
+ { "-shm_key 100" "Rpc002.b9"}
+ { "-tmp_dir $rpc_testdir" "Rpc002.b10"}
+ { "-txn_max 100" "Rpc002.b11"}
+ { "-txn_timestamp 100" "Rpc002.b12"}
+ { "-verbose {recovery on}" "Rpc002.b13"}
+ }
+
+ set e "berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn"
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err [is_substr $errorInfo \
+ "Interface not supported by Berkeley DB RPC"] 1
+ }
+
+ #
+ # Open an env with all the subsystems (-txn implies all
+ # the rest)
+ #
+ puts "\tRpc002.c: Unsupported env related interfaces"
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 \
+ -env $env $testfile"
+ set db [eval $dbcmd]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ #
+ # Test each "post-open" option relating to envs, txns, locks,
+ # logs and mpools.
+ #
+ set rlist {
+ { " lock_detect default" "Rpc002.c0"}
+ { " lock_get read 1 $env" "Rpc002.c1"}
+ { " lock_id" "Rpc002.c2"}
+ { " lock_stat" "Rpc002.c3"}
+ { " lock_vec 1 {get $env read}" "Rpc002.c4"}
+ { " log_archive" "Rpc002.c5"}
+ { " log_file {0 0}" "Rpc002.c6"}
+ { " log_flush" "Rpc002.c7"}
+ { " log_cursor" "Rpc002.c8"}
+ { " log_stat" "Rpc002.c9"}
+ { " mpool -create -pagesize 512" "Rpc002.c10"}
+ { " mpool_stat" "Rpc002.c11"}
+ { " mpool_sync {0 0}" "Rpc002.c12"}
+ { " mpool_trickle 50" "Rpc002.c13"}
+ { " txn_checkpoint -min 1" "Rpc002.c14"}
+ { " txn_stat" "Rpc002.c15"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $env $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err [is_substr $errorInfo \
+ "Interface not supported by Berkeley DB RPC"] 1
+ }
+ error_check_good dbclose [$db close] 0
+
+ #
+ # The database operations that aren't supported are few
+ # because mostly they are the ones Tcl doesn't support
+ # either so we have no way to get at them. Test what we can.
+ #
+ puts "\tRpc002.d: Unsupported database related interfaces"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ #
+ puts "\tRpc002.d0: -cachesize"
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 \
+ -env $env -cachesize {0 65536 0} $testfile"
+ set stat [catch {eval $dbcmd} ret]
+ error_check_good dbopen_cache $stat 1
+ error_check_good dbopen_cache_err [is_substr $errorInfo \
+ "Interface not supported by Berkeley DB RPC"] 1
+
+ puts "\tRpc002.d1: Try to upgrade a database"
+ set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
+ error_check_good dbupgrade $stat 1
+ error_check_good dbupgrade_err [is_substr $errorInfo \
+ "Interface not supported by Berkeley DB RPC"] 1
+ error_check_good envclose [$env close] 0
+
+ puts "\tRpc002.e: Open env with unsupported home dir"
+ set stat [catch {eval {berkdb_env_noerr -create -mode 0644 \
+ -home XXX -server $rpc_server -client_timeout 10000 \
+ -txn}} ret]
+ error_check_good env2open $stat 1
+ error_check_good envfail [is_substr $ret "Home unrecognized"] 1
+
+ puts "\tRpc002.f: Open env with a NULL home dir"
+ set stat [catch {eval {berkdb_env_noerr -create -mode 0644 \
+ -server $rpc_server -client_timeout 10000 -txn}} ret]
+ error_check_good env2open $stat 1
+ error_check_good envfail [is_substr $ret "Home unrecognized"] 1
+ } res]
+ if { $status != 0 } {
+ puts $res
+ }
+ tclkill $dpid
+}
diff --git a/db-4.8.30/test/rpc003.tcl b/db-4.8.30/test/rpc003.tcl
new file mode 100644
index 0000000..ae1e6bd
--- /dev/null
+++ b/db-4.8.30/test/rpc003.tcl
@@ -0,0 +1,184 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc003
+# TEST Test RPC and secondary indices.
+proc rpc003 { } {
+ source ./include.tcl
+ global dict nsecondaries
+ global rpc_svc
+
+ #
+ # First set up the files. Secondary indices only work readonly
+ # over RPC. So we need to create the databases first without
+ # RPC. Then run checking over RPC.
+ #
+ puts "Rpc003: Secondary indices over RPC"
+ puts "Rpc003: Using $rpc_svc"
+ if { [string compare $rpc_server "localhost"] != 0 } {
+ puts "Cannot run to non-local RPC server. Skipping."
+ return
+ }
+ cleanup $testdir NULL
+ puts "\tRpc003.a: Creating local secondary index databases"
+
+ # Primary method/args.
+ set pmethod btree
+ set pomethod [convert_method $pmethod]
+ set pargs ""
+ set methods {dbtree dbtree}
+ set argses [convert_argses $methods ""]
+ set omethods [convert_methods $methods]
+
+ set nentries 500
+
+ puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
+ set pname "primary003.db"
+ set snamebase "secondary003"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # We have set up our databases, so now start the server and
+ # read them over RPC.
+ #
+ set dpid [rpc_server_start]
+ puts "\tRpc003.c: Started server, pid $dpid"
+
+ #
+ # Wrap the remainder of the test in a catch statement so we
+ # can still kill the rpc server even if the test fails.
+ #
+ set status [catch {
+ tclsleep 2
+ set home [file tail $rpc_testdir]
+ set env [eval {berkdb_env_noerr -create -mode 0644 \
+ -home $home -server $rpc_server}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # Attempt to send in a NULL callback to associate. It will
+ # fail if the primary and secondary are not both read-only.
+ #
+ set msg "\tRpc003.d"
+ puts "$msg: Using r/w primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.e"
+ puts "$msg: Using r/w primary and read-only secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -env $env -rdonly \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.f"
+ puts "$msg: Using read-only primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env \
+ $pomethod -rdonly $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ # Open and associate the secondaries
+ puts "\tRpc003.g: Checking secondaries, both read-only"
+ set pdb [eval {berkdb_open_noerr -env} $env \
+ -rdonly $pomethod $pargs $pname]
+ error_check_good primary_open2 [is_valid_db $pdb] TRUE
+
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -env} $env -rdonly \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.$i.db]
+ error_check_good second_open2($i) \
+ [is_valid_db $sdb] TRUE
+ error_check_good db_associate2($i) \
+ [eval {$pdb associate} "" $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+ } res]
+ if { $status != 0 } {
+ puts $res
+ }
+ tclkill $dpid
+}
+
+proc rpc003_assoc_err { popen sopen msg } {
+ global rpc_svc
+
+ set pdb [eval $popen]
+ error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
+
+ puts "$msg.0: NULL callback"
+ set sdb [eval $sopen]
+ error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
+ set stat [catch {eval {$pdb associate} "" $sdb} ret]
+ error_check_good db_associate:rdonly $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+
+ # The Java and JE RPC servers support callbacks.
+ if { $rpc_svc == "berkeley_db_svc" || \
+ $rpc_svc == "berkeley_db_cxxsvc" } {
+ puts "$msg.1: non-NULL callback"
+ set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
+ error_check_good db_associate:callback $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+ }
+
+ error_check_good assoc_sclose [$sdb close] 0
+ error_check_good assoc_pclose [$pdb close] 0
+}
diff --git a/db-4.8.30/test/rpc004.tcl b/db-4.8.30/test/rpc004.tcl
new file mode 100644
index 0000000..2b92716
--- /dev/null
+++ b/db-4.8.30/test/rpc004.tcl
@@ -0,0 +1,87 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc004
+# TEST Test RPC server and security
+proc rpc004 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global passwd
+ global has_crypto
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc004: RPC server + security"
+ puts "Rpc004: Using $rpc_svc"
+ # Skip test if release does not support encryption.
+ if { $has_crypto == 0 } {
+ puts "Skipping test rpc004 for non-crypto release."
+ return
+ }
+
+ cleanup $testdir NULL
+ set dpid [rpc_server_start 1]
+ puts "\tRpc004.a: Started server, pid $dpid"
+
+ #
+ # Wrap the test in a catch statement so we can still kill
+ # the rpc server even if the test fails.
+ #
+ set status [catch {
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc004.b: Creating environment"
+
+ set testfile "rpc004.db"
+ set testfile1 "rpc004a.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -encryptaes $passwd -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc004.c: Opening a non-encrypted database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree \
+ -mode 0644} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc004.d: Opening an encrypted database"
+ set db1 [eval {berkdb_open -auto_commit -create -btree \
+ -mode 0644} -env $env -encrypt $testfile1]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\tRpc004.e: Put/get on both databases"
+ set key "key"
+ set data "data"
+
+ set ret [$db put -txn $txn $key $data]
+ error_check_good db_put $ret 0
+ set ret [$db get -txn $txn $key]
+ error_check_good db_get $ret [list [list $key $data]]
+ set ret [$db1 put -txn $txn $key $data]
+ error_check_good db1_put $ret 0
+ set ret [$db1 get -txn $txn $key]
+ error_check_good db1_get $ret [list [list $key $data]]
+
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ # Cleanup our environment because it's encrypted
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ } res]
+ if { $status != 0 } {
+ puts $res
+ }
+ tclkill $dpid
+}
diff --git a/db-4.8.30/test/rpc005.tcl b/db-4.8.30/test/rpc005.tcl
new file mode 100644
index 0000000..be30ec6
--- /dev/null
+++ b/db-4.8.30/test/rpc005.tcl
@@ -0,0 +1,158 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc005
+# TEST Test RPC server handle ID sharing
+proc rpc005 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ global is_hp_test
+ source ./include.tcl
+
+ puts "Rpc005: RPC server handle sharing"
+ puts "Rpc005: Using $rpc_svc"
+ set dpid [rpc_server_start]
+ puts "\tRpc005.a: Started server, pid $dpid"
+
+ #
+ # Wrap the test in a catch statement so we can still kill
+ # the rpc server even if the test fails.
+ #
+ set status [catch {
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc005.b: Creating environment"
+
+ set testfile "rpc005.db"
+ set testfile1 "rpc005a.db"
+ set subdb1 "subdb1"
+ set subdb2 "subdb2"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ # You can't open two handles on the same env in
+ # HP-UX, so skip this piece.
+ if { $is_hp_test == 1 } {
+ puts "\tRpc005.c: Skipping for HP-UX."
+ } else {
+ puts "\tRpc005.c: Compare identical and different \
+ configured envs"
+ set env_ident [eval {berkdb_env -home $home \
+ -server $rpc_server -txn}]
+ error_check_good \
+ lock_env:open [is_valid_env $env_ident] TRUE
+
+ set env_diff [eval {berkdb_env -home $home \
+ -server $rpc_server -txn nosync}]
+ error_check_good \
+ lock_env:open [is_valid_env $env_diff] TRUE
+
+ error_check_good \
+ ident:id [$env rpcid] [$env_ident rpcid]
+ error_check_bad \
+ diff:id [$env rpcid] [$env_diff rpcid]
+
+ error_check_good envclose [$env_diff close] 0
+ error_check_good envclose [$env_ident close] 0
+ }
+
+ puts "\tRpc005.d: Opening a database"
+ set db [eval {berkdb_open -auto_commit -create -btree \
+ -mode 0644} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc005.e: Compare identical and different \
+ configured dbs"
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+
+ set db_diff [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+
+ error_check_good ident:id [$db rpcid] [$db_ident rpcid]
+ error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
+ error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tRpc005.f: Compare with a database and subdatabases"
+ set db [eval {berkdb_open -auto_commit -create -btree \
+ -mode 0644} -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbid [$db rpcid]
+
+ set db2 [eval {berkdb_open -auto_commit -create -btree \
+ -mode 0644} -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+ set db2id [$db2 rpcid]
+ error_check_bad 2subdb:id $dbid $db2id
+
+ set db_ident [eval {berkdb_open -btree} -env $env \
+ $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+ set identid [$db_ident rpcid]
+
+ set db_ident2 [eval {berkdb_open -btree} -env $env \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_ident2] TRUE
+ set ident2id [$db_ident2 rpcid]
+
+ set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_diff1] TRUE
+ set diff1id [$db_diff1 rpcid]
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+ set diff2id [$db_diff2 rpcid]
+
+ set db_diff [eval {berkdb_open -unknown} -env $env -rdonly \
+ $testfile1]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+ set diffid [$db_diff rpcid]
+
+ set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2a] TRUE
+ set diff2aid [$db_diff2a rpcid]
+
+ error_check_good ident:id $dbid $identid
+ error_check_good ident2:id $db2id $ident2id
+ error_check_bad diff:id $dbid $diffid
+ error_check_bad diff2:id $db2id $diffid
+ error_check_bad diff3:id $diff2id $diffid
+ error_check_bad diff4:id $diff1id $diffid
+ error_check_good diff2a:id $diff2id $diff2aid
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_ident2 close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff1 close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db_diff2a close] 0
+ error_check_good db_close [$db2 close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ } res]
+ if { $status != 0 } {
+ puts $res
+ }
+ tclkill $dpid
+}
diff --git a/db-4.8.30/test/rpc006.tcl b/db-4.8.30/test/rpc006.tcl
new file mode 100644
index 0000000..47ed452
--- /dev/null
+++ b/db-4.8.30/test/rpc006.tcl
@@ -0,0 +1,77 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rpc006
+# TEST Test RPC server and multiple operations to server.
+# TEST Make sure the server doesn't deadlock itself, but
+# TEST returns DEADLOCK to the client.
+proc rpc006 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc006: RPC server + multiple operations"
+ puts "Rpc006: Using $rpc_svc"
+ cleanup $testdir NULL
+ set dpid [rpc_server_start]
+ puts "\tRpc006.a: Started server, pid $dpid"
+
+ #
+ # Wrap the test in a catch statement so we can still kill
+ # the rpc server even if the test fails.
+ #
+ set status [catch {
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc006.b: Creating environment"
+
+ set testfile "rpc006.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree \
+ -mode 0644} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc006.c: Create competing transactions"
+ set txn1 [$env txn]
+ set txn2 [$env txn]
+ error_check_good txn [is_valid_txn $txn1 $env] TRUE
+ error_check_good txn [is_valid_txn $txn2 $env] TRUE
+ set key1 "key1"
+ set key2 "key2"
+ set data1 "data1"
+ set data2 "data2"
+
+ puts "\tRpc006.d: Put with both txns to same page. Deadlock"
+ set ret [$db put -txn $txn1 $key1 $data1]
+ error_check_good db_put $ret 0
+ set res [catch {$db put -txn $txn2 $key2 $data2} ret]
+ error_check_good db_put2 $res 1
+ error_check_good db_putret [is_substr $ret DB_LOCK_DEADLOCK] 1
+ error_check_good txn_commit [$txn1 commit] 0
+
+ puts "\tRpc006.e: Retry after commit."
+ set res [catch {$db put -txn $txn2 $key2 $data2} ret]
+ error_check_good db_put2 $res 0
+ error_check_good db_putret $ret 0
+ error_check_good txn_commit [$txn2 commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ } res]
+ if { $status != 0 } {
+ puts $res
+ }
+ tclkill $dpid
+}
diff --git a/db-4.8.30/test/rsrc001.tcl b/db-4.8.30/test/rsrc001.tcl
new file mode 100644
index 0000000..40cc3fd
--- /dev/null
+++ b/db-4.8.30/test/rsrc001.tcl
@@ -0,0 +1,215 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc001
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc001 { } {
+ source ./include.tcl
+
+ puts "Rsrc001: Basic recno backing file writeback tests"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ set rec1 "This is record 1"
+ set rec2 "This is record 2 This is record 2"
+ set rec3 "This is record 3 This is record 3 This is record 3"
+ set rec4 [replicate "This is record 4 " 512]
+
+ foreach testfile { "$testdir/rsrc001.db" "" } {
+
+ cleanup $testdir NULL
+
+ if { $testfile == "" } {
+ puts "Rsrc001: Testing with in-memory database."
+ } else {
+ puts "Rsrc001: Testing with disk-backed database."
+ }
+
+ # Create backing file for the empty-file test.
+ set oid1 [open $testdir/rsrc.txt w]
+ fconfigure $oid1 -translation binary
+ close $oid1
+
+ puts "\tRsrc001.a: Put to empty file."
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+
+ set ret [eval {$db put} $txn {1 $rec1}]
+ error_check_good put_to_empty $ret 0
+ error_check_good db_close [$db close] 0
+
+ # Now fill out the backing file and create the check file.
+ set oid1 [open $testdir/rsrc.txt a]
+ set oid2 [open $testdir/check.txt w]
+ fconfigure $oid1 -translation binary
+ fconfigure $oid2 -translation binary
+
+ # This one was already put into rsrc.txt.
+ puts $oid2 $rec1
+
+ # These weren't.
+ puts $oid1 $rec2
+ puts $oid2 $rec2
+ puts $oid1 $rec3
+ puts $oid2 $rec3
+ puts $oid1 $rec4
+ puts $oid2 $rec4
+ close $oid1
+ close $oid2
+
+ puts -nonewline "\tRsrc001.b: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ fconfigure $oid -translation binary
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ set laststr $str
+ }
+ close $oid
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ Rsrc001:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc001.c: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ fconfigure $oid -translation binary
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.d: Append by record number"
+ set oid [open $testdir/check.txt a]
+ fconfigure $oid -translation binary
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record (set 2) $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.e: Put beyond end of file."
+ set oid [open $testdir/check.txt a]
+ fconfigure $oid -translation binary
+ for {set i 1} {$i < 10} {incr i} {
+ puts $oid ""
+ incr key
+ }
+ set rec "Last Record"
+ puts $oid $rec
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+
+ puts "\tRsrc001.f: Put beyond end of file, after reopen."
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Last record with reopen"
+ puts $oid $rec
+
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ puts "\tRsrc001.g:\
+ Put several beyond end of file, after reopen with snapshot."
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -snapshot -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Really really last record with reopen"
+ puts $oid ""
+ puts $oid ""
+ puts $oid ""
+ puts $oid $rec
+
+ incr key
+ incr key
+ incr key
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.h: Verify proper syncing of changes on close."
+ error_check_good Rsrc001:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ fconfigure $oid -translation binary
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return; we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc001:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good Rsrc001:diff($testdir/{rsrc,check}.txt) $ret 0
+ }
+}
+
diff --git a/db-4.8.30/test/rsrc002.tcl b/db-4.8.30/test/rsrc002.tcl
new file mode 100644
index 0000000..cbb0c62
--- /dev/null
+++ b/db-4.8.30/test/rsrc002.tcl
@@ -0,0 +1,65 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc002
+# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
+# TEST file with colon-delimited records, and make sure they are correctly
+# TEST interpreted.
+proc rsrc002 { } {
+ source ./include.tcl
+
+ puts "Rsrc002: Alternate variable-length record delimiters."
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ foreach testfile { "$testdir/rsrc002.db" "" } {
+
+ cleanup $testdir NULL
+
+ # Create the starting files
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ puts -nonewline $oid1 "ostrich:emu:kiwi:moa:cassowary:rhea:"
+ puts -nonewline $oid2 "ostrich:emu:kiwi:penguin:cassowary:rhea:"
+ close $oid1
+ close $oid2
+
+ if { $testfile == "" } {
+ puts "Rsrc002: Testing with in-memory database."
+ } else {
+ puts "Rsrc002: Testing with disk-backed database."
+ }
+
+ puts "\tRsrc002.a: Read file, verify correctness."
+ set db [eval {berkdb_open -create -mode 0644 -delim 58 \
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -first]
+ error_check_good get_first $rec [list [list 1 "ostrich"]]
+ set rec [$dbc get -next]
+ error_check_good get_next $rec [list [list 2 "emu"]]
+
+ puts "\tRsrc002.b: Write record, verify correctness."
+
+ eval {$dbc get -set 4}
+ set ret [$dbc put -current "penguin"]
+ error_check_good dbc_put $ret 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ error_check_good \
+ Rsrc002:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+ }
+}
diff --git a/db-4.8.30/test/rsrc003.tcl b/db-4.8.30/test/rsrc003.tcl
new file mode 100644
index 0000000..4c4b1f2
--- /dev/null
+++ b/db-4.8.30/test/rsrc003.tcl
@@ -0,0 +1,178 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc003
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc003 { } {
+ source ./include.tcl
+ global fixed_len
+
+ puts "Rsrc003: Basic recno backing file writeback tests fixed length"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ #
+ # Then run with big fixed-length records
+ set rec1 "This is record 1"
+ set rec2 "This is record 2"
+ set rec3 "This is record 3"
+ set bigrec1 [replicate "This is record 1 " 512]
+ set bigrec2 [replicate "This is record 2 " 512]
+ set bigrec3 [replicate "This is record 3 " 512]
+
+ set orig_fixed_len $fixed_len
+ set rlist {
+ {{$rec1 $rec2 $rec3} "small records" }
+ {{$bigrec1 $bigrec2 $bigrec3} "large records" }}
+
+ foreach testfile { "$testdir/rsrc003.db" "" } {
+
+ foreach rec $rlist {
+ cleanup $testdir NULL
+
+ set recs [lindex $rec 0]
+ set msg [lindex $rec 1]
+
+ # Create the starting files
+ # Note that for the rest of the test, we are going
+ # to append a LF when we 'put' via DB to maintain
+ # file structure and allow us to use 'gets'.
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ fconfigure $oid1 -translation binary
+ fconfigure $oid2 -translation binary
+ foreach record $recs {
+ set r [subst $record]
+ set fixed_len [string length $r]
+ puts $oid1 $r
+ puts $oid2 $r
+ }
+ close $oid1
+ close $oid2
+
+ set reclen [expr $fixed_len + 1]
+ if { $reclen > [string length $rec1] } {
+ set repl 512
+ } else {
+ set repl 2
+ }
+ if { $testfile == "" } {
+ puts \
+"Rsrc003: Testing with in-memory database with $msg."
+ } else {
+ puts \
+"Rsrc003: Testing with disk-backed database with $msg."
+ }
+
+ puts -nonewline \
+ "\tRsrc003.a: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (don't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ fconfigure $oid -translation binary
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ append str \12
+ set laststr $str
+ }
+ close $oid
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ diff1($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc003.b: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ fconfigure $oid -translation binary
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "This is New Record $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff2($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc003.c: Append by record number"
+ set oid [open $testdir/check.txt a]
+ fconfigure $oid -translation binary
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 2) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff3($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts \
+"\tRsrc003.d: Verify proper syncing of changes on close."
+ error_check_good Rsrc003:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ fconfigure $oid -translation binary
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 3) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return;
+ # we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc003:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff5($testdir/{rsrc,check}.txt) $ret 0
+ }
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/db-4.8.30/test/rsrc004.tcl b/db-4.8.30/test/rsrc004.tcl
new file mode 100644
index 0000000..56618e3
--- /dev/null
+++ b/db-4.8.30/test/rsrc004.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST rsrc004
+# TEST Recno backing file test for EOF-terminated records.
+proc rsrc004 { } {
+ source ./include.tcl
+
+ foreach isfixed { 0 1 } {
+ cleanup $testdir NULL
+
+ # Create the backing text file.
+ set oid1 [open $testdir/rsrc.txt w]
+ if { $isfixed == 1 } {
+ puts -nonewline $oid1 "record 1xxx"
+ puts -nonewline $oid1 "record 2xxx"
+ } else {
+ puts $oid1 "record 1xxx"
+ puts $oid1 "record 2xxx"
+ }
+ puts -nonewline $oid1 "record 3"
+ close $oid1
+
+ set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
+ if { $isfixed == 1 } {
+ append args " -len [string length "record 1xxx"]"
+ set match "record 3 "
+ puts "Rsrc004: EOF-terminated recs: fixed length"
+ } else {
+ puts "Rsrc004: EOF-terminated recs: variable length"
+ set match "record 3"
+ }
+
+ puts "\tRsrc004.a: Read file, verify correctness."
+ set db [eval berkdb_open $args "$testdir/rsrc004.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record
+ set dbc [eval {$db cursor} ""]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last $rec [list [list 3 $match]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/db-4.8.30/test/scr001/chk.code b/db-4.8.30/test/scr001/chk.code
new file mode 100644
index 0000000..4b706ee
--- /dev/null
+++ b/db-4.8.30/test/scr001/chk.code
@@ -0,0 +1,38 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that the code samples in the documents build.
+
+d=../../docs_src
+
+[ -d $d ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+exitv=0
+for i in `find $d -name '*.cs' | sed '/\/ref_xml\//d'`; do
+ echo " compiling $i"
+ sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
+ -e 's/m4_[a-z]*[(\[)]*//g' \
+ -e 's/(\[//g' \
+ -e '/argv/!s/])//g' \
+ -e 's/dnl//g' \
+ -e 's/__GT__/>/g' \
+ -e 's/__LB__/[/g' \
+ -e 's/__LT__/</g' \
+ -e 's/__RB__/]/g' < $i > t.c
+ if cc -pthread -Wall -Werror -I.. t.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exitv=1
+ fi
+done
+
+exit $exitv
diff --git a/db-4.8.30/test/scr002/chk.def b/db-4.8.30/test/scr002/chk.def
new file mode 100644
index 0000000..16001fd
--- /dev/null
+++ b/db-4.8.30/test/scr002/chk.def
@@ -0,0 +1,67 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Windows libdb.def file.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/build_windows/libdb.def
+t1=__1
+t2=__2
+
+exitv=0
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/^__/d' \
+ -e '/^;/d' |
+ sort > $t1
+
+egrep __P $d/dbinc_auto/ext_prot.in |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed -e 's/^\*//' \
+ -e '/db_env_set_func_\(free\|malloc\|realloc\)/p' \
+ -e '/db_env_set_func_/d' |
+ sed '/^__/d' | sort > $t2
+
+if cmp -s $t1 $t2 ; then
+ :
+else
+ echo "<<< libdb.def >>> DB include files"
+ diff $t1 $t2
+ echo "FAIL: missing items in libdb.def file."
+ exitv=1
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' |
+ sed -e '/__db_C*/d' > $t1
+
+for i in `cat $t1`; do
+ if egrep $i $d/*/*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary items in libdb.def file."
+ exitv=1
+}
+
+exit $exitv
diff --git a/db-4.8.30/test/scr003/chk.define b/db-4.8.30/test/scr003/chk.define
new file mode 100644
index 0000000..52bf421
--- /dev/null
+++ b/db-4.8.30/test/scr003/chk.define
@@ -0,0 +1,106 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that all #defines are actually used.
+# Check to make sure that all #defines start in column 1.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t1=__1
+t2=__2
+t3=__3
+
+find $d -name '*.c' -o -name '*.cpp' |
+ sed -e '/\/php_db4\//d' |
+ xargs egrep '^[ ][ ]*#' > $t1
+test -s $t1 && {
+ echo "FAIL: found #defines with leading white space:"
+ cat $t1
+ exit 1
+}
+
+egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
+ sed -e '/db_185.in/d' -e '/queue.h/d' |
+ awk '{print $2}' |
+ sed -e '/^AI_CANONNAME/d' \
+ -e '/^AI_NUMERICHOST/d' \
+ -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CHECK_THREAD/d' \
+ -e '/^DB_BTREEOLDVER/d' \
+ -e '/^DB_DEGREE_2/d' \
+ -e '/^DB_HASHOLDVER/d' \
+ -e '/^DB_LOCKVERSION/d' \
+ -e '/^DB_LOG_PERM_42_44/d' \
+ -e '/^DB_LOG_RESEND_42_44/d' \
+ -e '/^DB_MAX_PAGES/d' \
+ -e '/^DB_PAGE_QUEUE_LEN/d' \
+ -e '/^DB_QAMOLDVER/d' \
+ -e '/^DB_RETURNS_A_KEY_HASH/d' \
+ -e '/^DB_SPARE_FLAG/d' \
+ -e '/^DB_TXNVERSION/d' \
+ -e '/^DB_UNUSED/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^FHASH/d' \
+ -e '/^HASH_UNUSED/d' \
+ -e '/^HPUX_MUTEX_PAD/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MAX_ID/d' \
+ -e '/^MAXHOSTNAMELEN/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^MUTEX_LOCK_PARTITION/d' \
+ -e '/^MUTEX_UNLOCK_PARTITION/d' \
+ -e '/^NAME_TO_SEQUENCE/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^P_64_COPY/d' \
+ -e '/^P_64_SWAP/d' \
+ -e '/^P_TO_UINT16/d' \
+ -e '/^QPAGE_CHKSUM/d' \
+ -e '/^QPAGE_NORMAL/d' \
+ -e '/^QPAGE_SEC/d' \
+ -e '/^SIZEOF_PAGE/d' \
+ -e '/^TAILQ_/d' \
+ -e '/^UINT64_FMT/d' \
+ -e '/^UINT64_MAX/d' \
+ -e '/^VM_PAGESIZE/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^_WIN32_WINNT/d' \
+ -e '/^__BIT_TYPES_DEFINED__/d' \
+ -e '/^__DBC_INTERNAL/d' \
+ -e '/^__STDC__/d' \
+ -e '/^__lock_locker_hash/d' \
+ -e '/^i_/d' \
+ -e '/_H_/d' \
+ -e 's/(.*//' | sort > $t1
+
+find $d -name '*.c' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ if egrep -w $i `cat $t2` > /dev/null; then
+ :;
+ else
+ f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
+ sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
+ echo "FAIL: $i: $f"
+ fi
+done | sort -k 2 > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unused #defines"
+ exit 1
+}
+
+exit $exitv
diff --git a/db-4.8.30/test/scr004/chk.javafiles b/db-4.8.30/test/scr004/chk.javafiles
new file mode 100644
index 0000000..9c7ed83
--- /dev/null
+++ b/db-4.8.30/test/scr004/chk.javafiles
@@ -0,0 +1,30 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we haven't forgotten to add any Java files to the list
+# of source files in the Makefile.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/Makefile.in
+
+t1=__1
+t2=__2
+
+find $d/java $d/examples_java -name \*.java -print |
+ sed -e 's/^.*\///' | sort -u > $t1
+tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort -u > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< java source files >>> Makefile"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr005/chk.nl b/db-4.8.30/test/scr005/chk.nl
new file mode 100644
index 0000000..522ddbe
--- /dev/null
+++ b/db-4.8.30/test/scr005/chk.nl
@@ -0,0 +1,114 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that there are no trailing newlines in __db_err calls.
+
+d=../..
+
+[ -f $d/README ] || {
+ echo "FAIL: chk.nl can't find the source directory."
+ exit 1
+}
+
+cat << END_OF_CODE > t.c
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+int chk(FILE *, char *);
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ FILE *fp;
+ int exitv;
+
+ for (exitv = 0; *++argv != NULL;) {
+ if ((fp = fopen(*argv, "r")) == NULL) {
+ fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
+ return (1);
+ }
+ if (chk(fp, *argv))
+ exitv = 1;
+ (void)fclose(fp);
+ }
+ return (exitv);
+}
+
+int
+chk(fp, name)
+ FILE *fp;
+ char *name;
+{
+ int ch, exitv, line, q;
+
+ exitv = 0;
+ for (ch = 'a', line = 1;;) {
+ if ((ch = getc(fp)) == EOF)
+ return (exitv);
+ if (ch == '\n') {
+ ++line;
+ continue;
+ }
+ if (!isspace(ch)) continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'd') continue;
+ if ((ch = getc(fp)) != 'b') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'e') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != '(') continue;
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ }
+ while ((ch = getc(fp)) != '"')
+ switch (ch) {
+ case EOF:
+ return (exitv);
+ case '\\n':
+ ++line;
+ break;
+ case '.':
+ if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <period> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ case '\\\\':
+ if ((ch = getc(fp)) != 'n')
+ ungetc(ch, fp);
+ else if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <newline> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ }
+ }
+ return (exitv);
+}
+END_OF_CODE
+
+cc t.c -o t
+if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
+ :
+else
+ echo "FAIL: found __db_err calls ending with periods/newlines."
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr006/chk.offt b/db-4.8.30/test/scr006/chk.offt
new file mode 100644
index 0000000..62b991a
--- /dev/null
+++ b/db-4.8.30/test/scr006/chk.offt
@@ -0,0 +1,57 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Make sure that no off_t's have snuck into the release.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t=__1
+
+egrep -w off_t $d/*/*.[ch] $d/*/*.in |
+sed -e "/#undef off_t/d" \
+ -e "/build_brew\//d" \
+ -e "/build_wince\//d" \
+ -e "/build_windows\//d" \
+ -e "/db_env_set_func_ftruncate/d" \
+ -e "/db_env_set_func_pread/d" \
+ -e "/db_env_set_func_pwrite/d" \
+ -e "/db_env_set_func_seek/d" \
+ -e "/env_register.c/d" \
+ -e "/j_ftruncate/d" \
+ -e "/j_pread/d" \
+ -e "/j_pwrite/d" \
+ -e "/j_seek/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/mutex\/tm.c:/d" \
+ -e "/off_t because its size depends on/d" \
+ -e "/os_ext.h/d" \
+ -e "/os_flock.c/d" \
+ -e "/zerofill.c:.*stat_offset/d" \
+ -e "/zerofill.c:.*write_offset/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_method.c/d" \
+ -e "/os_rw.c:/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/os_truncate.c:.*off_t offset;/d" \
+ -e "/os_truncate.c:.*off_t stat_offset;/d" \
+ -e "/os_truncate.c:.*offset = /d" \
+ -e "/test_perf\/perf_misc.c:/d" \
+ -e "/test_server\/dbs.c:/d" \
+ -e "/test_vxworks\/vx_mutex.c:/d" > $t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found questionable off_t usage"
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr007/chk.proto b/db-4.8.30/test/scr007/chk.proto
new file mode 100644
index 0000000..5e125d2
--- /dev/null
+++ b/db-4.8.30/test/scr007/chk.proto
@@ -0,0 +1,44 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that prototypes are actually needed.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+t3=__3
+
+egrep '__P' $d/dbinc_auto/*.h |
+ sed -e 's/[ ][ ]*__P.*//' \
+ -e 's/^.*[ *]//' \
+ -e '/__db_cprint/d' \
+ -e '/__db_lprint/d' \
+ -e '/__db_noop_log/d' \
+ -e '/__db_prnpage/d' \
+ -e '/__db_txnlist_print/d' \
+ -e '/__db_util_arg/d' \
+ -e '/__ham_func2/d' \
+ -e '/__ham_func3/d' \
+ -e '/_print$/d' \
+ -e '/_read$/d' > $t1
+
+find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ c=$(egrep -Hlw $i $(cat $t2) | wc -l)
+ echo "$i: $c"
+done | egrep ' 1$' > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unnecessary prototypes."
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr008/chk.pubdef b/db-4.8.30/test/scr008/chk.pubdef
new file mode 100644
index 0000000..0b74741
--- /dev/null
+++ b/db-4.8.30/test/scr008/chk.pubdef
@@ -0,0 +1,189 @@
+#!/bin/sh -
+#
+# Reconcile the list of public defines with the man pages and the Java files.
+
+d=../..
+docs=$d/docs_src
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+p=$d/dist/pubdef.in
+
+exitv=0
+
+# remove m4 doc tests, m4 has been removed for 4.8
+# TODO: add test for csharp const
+#cat <<END_OF_TEXT
+#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+#Check that pubdef.in has everything listed in m4.links.
+#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+#END_OF_TEXT
+
+#f=$docs/m4/m4.links
+#sed -n \
+# -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
+# -e d < $f |
+#while read name; do
+# if `egrep -w "$name" $p > /dev/null`; then
+# :
+# else
+# echo "$f: $name is missing from $p"
+# exitv=1
+# fi
+#done
+
+#cat <<END_OF_TEXT
+#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+#Check that m4.links has everything listed in pubdef.in.
+#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+#END_OF_TEXT
+
+#f=$docs/m4/m4.links
+#sed '/^#/d' $p |
+#while read name isdoc isinc isjava; do
+# if `egrep -w "^.1, $name" $f > /dev/null`; then
+# [ "X$isdoc" != "XD" ] && {
+# echo "$name should not appear in $f"
+# exitv=1
+# }
+# else
+# [ "X$isdoc" = "XD" ] && {
+# echo "$name does not appear in $f"
+# exitv=1;
+# }
+# fi
+#done
+
+cat <<END_OF_TEXT
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Check that pubdef.in has everything listed in db.in plus api_flags.in.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+END_OF_TEXT
+
+cat $d/dbinc/db.in $d/dbinc_auto/api_flags.in | sed -n \
+ -e 's/^#define[ ]*\(DB_[A-Z_0-9][A-Z_0-9]*\).*/\1/p' \
+ -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
+ -e d |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "db.in/api_flags.in: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+cat <<END_OF_TEXT
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Check that api_flags.in plus db.in has everything listed in pubdef.in.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+END_OF_TEXT
+
+sed '/^#/d' $p |
+while read name isdoc isinc isjava iscsharp; do
+ if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9][0-9]*" \
+ $d/dbinc/db.in $d/dbinc_auto/api_flags.in > /dev/null`; then
+ [ "X$isinc" != "XI" ] && {
+ echo "$name should not appear in db.in/api_flags.in"
+ exitv=1
+ }
+ else
+ [ "X$isinc" = "XI" ] && {
+ echo "$name does not appear in db.in/api_flags.in"
+ exitv=1
+ }
+ fi
+done
+
+cat <<END_OF_TEXT
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Check that pubdef.in has everything listed in DbConstants.java.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+END_OF_TEXT
+
+j=$d/java/src/com/sleepycat/db
+f=$j/internal/DbConstants.java
+sed -n -e 's/.*int[ ]\([^ ]*\).*;/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+cat <<END_OF_TEXT
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Check that DbConstants.java has everything listed in pubdef.in.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+END_OF_TEXT
+
+f=$j/internal/DbConstants.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava iscsharp; do
+ if `egrep -w "int[ ]$name =" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+cat <<END_OF_TEXT
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Check that all constants in pubdef.in are wrapped by the Java API.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+END_OF_TEXT
+
+#Strip out Javadoc comments
+t=__1
+cat $j/*.java $d/rpc_server/java/*.java \
+ $j/internal/Db.java $j/internal/DbEnv.java \
+ $j/internal/db_javaJNI.java | sed '/\/\*\*/,/\*\// d' > $t
+
+sed '/^#/d' $p |
+while read name isdoc isinc isjava iscsharp; do
+ if `egrep -w "$name" $t > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in the Java API"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in the Java API"
+ exitv=1
+ }
+ fi
+done
+
+cat <<END_OF_TEXT
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+Check that all constants in pubdef.in are wrapped by the Java native layer.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+END_OF_TEXT
+
+sed '/^#/d' $p |
+while read name isdoc isinc isjava iscsharp; do
+ if `egrep -w "$name" $d/libdb_java/db_java_wrap.c > /dev/null`; then
+ [ "X$isjava" != "XN" ] && [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in the Java native layer"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XN" ] && {
+ echo "$name does not appear in the Java native layer"
+ exitv=1
+ }
+ fi
+done
+
+exit $exitv
diff --git a/db-4.8.30/test/scr009/chk.srcfiles b/db-4.8.30/test/scr009/chk.srcfiles
new file mode 100644
index 0000000..3812320
--- /dev/null
+++ b/db-4.8.30/test/scr009/chk.srcfiles
@@ -0,0 +1,50 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Windows uses to build its dsp files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/srcfiles.in
+t1=__1
+t2=__2
+
+sed -e '/^[ #]/d' \
+ -e '/^db_server_clnt.c/d' \
+ -e '/^db_server_svc.c/d' \
+ -e '/^db_server_xdr.c/d' \
+ -e '/^examples_c\/csv\/csv_local.c/d' \
+ -e '/^gen_db_server.c/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > $t1
+find $d -type f |
+ sed -e 's/^\.\.\/\.\.\///' \
+ -e '/^build[^_]/d' \
+ -e '/^dist\//d' \
+ -e '/^libdb_java\/java_stat_auto.c/d' \
+ -e '/^mod_db4\//d' \
+ -e '/^perl\//d' \
+ -e '/^php_db4\//d' \
+ -e '/^rpc_server\/c\/gen_db_server.c/d' \
+ -e '/^test\//d' \
+ -e '/^test_erlang/d' \
+ -e '/^test_server/d' \
+ -e '/^test_thread/d' \
+ -e '/^test_vxworks/d' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< srcfiles.in >>> existing files"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr010/chk.str b/db-4.8.30/test/scr010/chk.str
new file mode 100644
index 0000000..18e757b
--- /dev/null
+++ b/db-4.8.30/test/scr010/chk.str
@@ -0,0 +1,42 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check spelling in quoted strings.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__t1
+
+sed -e '/^#include/d' \
+ -e '/"/!d' \
+ -e 's/^[^"]*//' \
+ -e 's/%s/ /g' \
+ -e 's/[^"]*$//' \
+ -e 's/\\[nt]/ /g' \
+ `find $d -name '*.[ch]' -o -name '*.cpp' -o -name '*.java' |
+ sed -e '/\/perl\//d' -e '/\/test_erlang\//d'` |
+spell | sort -u | comm -23 /dev/stdin spell.ok > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in strings."
+ exit 1
+}
+
+egrep -h '/\* | \* ' \
+ `find $d -name '*.[ch]' -o -name '*.cpp' | sed -e '/\/perl\//d' -e '/\/test_erlang\//d'` |
+spell | sort -u | comm -23 /dev/stdin spell.ok | tee /tmp/f/1 > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in comments."
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr010/spell.ok b/db-4.8.30/test/scr010/spell.ok
new file mode 100644
index 0000000..3eafae3
--- /dev/null
+++ b/db-4.8.30/test/scr010/spell.ok
@@ -0,0 +1,4903 @@
+AAAA
+AAAAAAAAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAIIIIIIIIIIIIIIIIDNOAA
+AAAAAAAAAAAAAAAABCKLLDDDDDEEEEEEEEEEEEEEEEEEEEAAAAAAAADD
+AAAAGGGGGGGHAFBFAAFFAAQPIIJJIIIIIIIIIIIIIIIIII
+ABCDEFabcdef
+ABS
+ABSMODE
+ABc
+ACK
+ACKS
+ADDR
+ADDRINFO
+AEE
+AEEApplet
+AES
+AIX's
+AJVX
+ALG
+ALLBACK
+ALLDB
+ALLOC
+ALLZEROES
+AMs
+API
+APIs
+APP
+APPMALLOC
+APPNAME
+APPREC
+ARG
+ARGS
+ASSOC
+ASYNC
+ATOI
+ATOL
+AUTOCOMMIT
+AUTOINIT
+AUTOREMOVE
+AV
+AccessExample
+AccesssExample
+Acflmo
+Acknowledgement
+Acknowledgements
+Acks
+Aclmop
+Aclop
+Adata
+AddEnumConstant
+Addr
+Ahlm
+Ahm
+AllFields
+AllowAddSecondary
+AllowChangeKeyMetadata
+AllowFieldAddDelete
+AllowFieldTypeChanges
+AllowPopulate
+AllowPriKeyField
+AnnotRemoved
+AnnotationDefault
+Antoon
+Api
+App
+AppDispatch
+AppendRecnoTest
+Applock
+Args
+ArithmeticException
+Arntzen
+ArrayIndex
+ArrayList
+ArrayNameChange
+AssertionError
+AssociateCallbacks
+AssociateTest
+AttachCurrentThread
+Aug
+BASETEST
+BBBB
+BC
+BCFILprRsvVxX
+BCc
+BDB
+BDBXXXXX
+BDBXXXXXX
+BEGID
+BH
+BH's
+BI
+BII
+BINTERNAL
+BITSPERBLOCK
+BKEYDATA
+BLKSIZE
+BNF
+BOTHC
+BOVERFLOW
+BR
+BREW's
+BSIZE
+BTCOMPARE
+BTMETA
+BTREE
+BTREEMAGIC
+BTREEMETA
+BTREEOLDVER
+BTREEVERSION
+BUF
+BULKOVF
+Backoff
+Barreto
+BaseClass
+Bc
+Bdata
+BerkeleyDB
+BigDecimal
+BigInt
+BigInteger
+BindingSpeedTest
+Bosselaers
+BtRecExample
+Btree
+BtreeComparator
+BtreeStat
+BtreeStats
+Btrees
+BulkAccessExample
+BulkAccessNIOExample
+ByteArray
+ByteArrayBinding
+ByteArrayFormat
+ByteArrayInputStream
+ByteArrayOutputStream
+ByteBuffer
+C'est
+CACHESIZE
+CALLBACK
+CALLPGIN
+CANONNAME
+CBC
+CC
+CCCC
+CCYYMMDDhhmm
+CD
+CDATA
+CDB
+CDCDEEEEEEEEEEEEEEEEEEEEBABABBBBDCFFFGGGEDCDCDCDCDCDCDCDCD
+CDCEEEEDDDDDDDCDCDCEFEFDDEEFFDEDEEEBDDBBDDDDDDCCCCCCCCEFED
+CDS
+CDSGROUP
+CDSGroup
+CDdFILTVvX
+CFB
+CFILpRsv
+CFLprsvVxX
+CFf
+CFh
+CHARKEY
+CHGPG
+CHILDCOMMIT
+CHILDINFO
+CHILDINFOs
+CHK
+CHKPNT
+CHKPOINT
+CHKSUM
+CKP
+CKPLSN
+CL
+CLASSPATH
+CLEARLEN
+CLOSEFP
+CLR
+CLRDBC
+CLpsvxX
+CMD
+CMP
+CNT
+COMPAT
+COMPQUIET
+CONCAT
+CONCATDATAKEY
+CONCATKEYDATA
+CONF
+CONFIG
+CONNFAIL
+CONST
+CRTL
+CRYPTO
+CSV
+CT
+CTIME
+CTX
+CXX
+CacheFilePriority
+CacheFileStats
+CacheStats
+Cachesize
+Callback
+Callbacks
+Cc
+CdFILTvX
+Cf
+Ch
+ClassCastException
+ClassCatalog
+ClassCatalogDB
+ClassInfo
+ClassNotFoundException
+ClassRemoved
+ClientData
+CloseHandle
+Cmd
+Cmp
+Co
+CollectionTest
+CompactStat
+CompactStats
+CompanyD
+CompositeKeyField
+Config
+ConstantValue
+ConvertExample
+CopyObjBytes
+CreateEvent
+CreateFile
+CreateFileForMapping
+CreateFileMapping
+CreateHashEntry
+CreateInstance
+Crypto
+CurrentTransaction
+Cygwin
+DATAHOME
+DB's
+DBC
+DBCursor
+DBDIR
+DBENV
+DBGPRINTF
+DBHOME
+DBINFO
+DBINIT
+DBLOCAL
+DBLOG
+DBM
+DBMETA
+DBMETASIZE
+DBMSG
+DBNAME
+DBP
+DBP's
+DBREG
+DBREP
+DBS
+DBSDIR
+DBT
+DBT's
+DBTCL
+DBTList
+DBTYPE
+DBTs
+DBa
+DBaa
+DBaz
+DBba
+DBcursor
+DBs
+DBz
+DDCDCDEEEEEEEEEEFEEEEEEDDEEDDEE
+DDDEEEEEEEEEEEEEEEEEEEEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+DEADFILE
+DECLS
+DEF
+DEFMINKEYPAGE
+DELAYCLIENT
+DELNO
+DGREE
+DIAG
+DIR
+DIRECTIO
+DIRENT
+DIST
+DJ
+DLFCN
+DLL
+DLLs
+DNS
+DOALL
+DONOTINDEX
+DONTEXTEND
+DONTLOCK
+DS
+DST
+DSYNC
+DTL
+DUP
+DUPFIRST
+DUPID
+DUPLAST
+DUPMASTER
+DUPOK
+DUPONLY
+DUPS
+DUPSET
+DUPSORT
+DWORD
+DataBinding
+DataBuffer
+DataCursor
+DataDb
+DataEnvironment
+DataFormat
+DataIndex
+DataInput
+DataInputStream
+DataOutput
+DataOutputStream
+DataStore
+DataThang
+DataType
+DataView
+DatabaseEntry
+DatabaseException
+DatabaseTest
+DatabaseType
+Db
+DbAppDispatch
+DbAppendRecno
+DbAssociate
+DbAttachImpl
+DbBtreeCompare
+DbBtreePrefix
+DbBtreeStat
+DbClient
+DbCompact
+DbCompactStat
+DbCount
+DbCursor
+DbDeadlockException
+DbDelete
+DbDispatcher
+DbDupCompare
+DbEnv
+DbEnvFeedback
+DbEnvFeedbackHandler
+DbErrcall
+DbErrorHandler
+DbException
+DbFeedback
+DbFeedbackHandler
+DbField
+DbGet
+DbGetFlags
+DbGetOpenFlags
+DbGetjoin
+DbHash
+DbHashStat
+DbInfoDelete
+DbKeyRange
+DbLock
+DbLockNotGrantedException
+DbLockRequest
+DbLockStat
+DbLogStat
+DbLogc
+DbLsn
+DbMemoryException
+DbMpoolFStat
+DbMpoolFile
+DbMpoolFileStat
+DbMpoolStat
+DbMultiple
+DbMultipleDataIterator
+DbMultipleIterator
+DbMultipleKeyDataIterator
+DbMultipleRecnoDataIterator
+DbOpen
+DbOutputStreamErrcall
+DbPanicHandler
+DbPreplist
+DbPut
+DbQueueStat
+DbRecord
+DbRecoveryInit
+DbRemove
+DbRename
+DbRepStat
+DbRepTransport
+DbRunRecoveryException
+DbSecondaryKeyCreate
+DbSequence
+DbServer
+DbStat
+DbTestUtil
+DbTruncate
+DbTxn
+DbTxnRecover
+DbTxnStat
+DbUpgrade
+DbUtil
+DbVerify
+DbXA
+DbXAResource
+DbXid
+Dbc
+DbcDup
+DbcGet
+DbcPut
+Dbm
+DbmCommand
+Dbp
+Dbs
+Dbt
+Dbt's
+Dbts
+Dde
+DeadLockException
+Deadfile
+DeadlockException
+Debian
+DeferredWrite
+DeleteEnumConstant
+DeleteFile
+DeleteInfo
+DeleteSuperclass
+DeletedEntity
+DeletedKeys
+DeletedPersist
+Deref'ing
+Dir
+DirectBuffer
+DisallowChangeKeyRelate
+DisallowChangeProxyFor
+DisallowCompositeKeyField
+DisallowDeleteProxyFor
+DisallowNonKeyField
+DisallowPriKeyField
+DisallowSecKeyField
+Dup
+DuplicateComparator
+Dups
+EACCES
+EADDRINUSE
+EADDRNOTAVAIL
+EAFNOSUPPORT
+EAGAIN
+EALREADY
+EAUTH
+EBADF
+EBADFILENAME
+EBADMSG
+EBADRPC
+EBADSEEKPOS
+EBUSY
+ECANCELED
+ECB
+ECHILD
+ECONNABORTED
+ECONNREFUSED
+ECONNRESET
+EDEADLK
+EDESTADDRREQ
+EDIRIOCTL
+EDIRNOEXISTS
+EDIRNOTEMPTY
+EDOM
+EDOOFUS
+EDQUOT
+EEXIST
+EEXISTS
+EFAULT
+EFBIG
+EFFFFFFFFGGFFFGGFFFEEFGFGFEEEEEEEEEEEEEEEEEEEEDEDEDDDDD
+EFILEEOF
+EFILEEXISTS
+EFILENOEXISTS
+EFILEOPEN
+EFSFULL
+EFTYPE
+EGENCHG
+EGENUPDATE
+EHOSTDOWN
+EHOSTUNREACH
+EID
+EIDRM
+EIDS
+EILSEQ
+EINPROGRESS
+EINTR
+EINVAL
+EINVALIDOPERATION
+EIO
+EIRT
+EISCONN
+EISDIR
+EIi
+EJUSTRETURN
+ELAST
+ELECTINIT
+ELECTVOTE
+ELOOP
+EMFILE
+EMLINK
+EMSG
+EMSGSIZE
+EMULTIHOP
+ENAMETOOLONG
+ENEEDAUTH
+ENETDOWN
+ENETRESET
+ENETUNREACH
+ENOATTR
+ENOBUFS
+ENODEV
+ENOENT
+ENOERROR
+ENOEXEC
+ENOIOCTL
+ENOLCK
+ENOLINK
+ENOMEDIA
+ENOMEM
+ENOMEMORY
+ENOMSG
+ENOPROTOOPT
+ENOSPC
+ENOSYS
+ENOTBLK
+ENOTCONN
+ENOTDIR
+ENOTEMPTY
+ENOTSOCK
+ENOTSUP
+ENOTTY
+ENT
+ENV
+ENV's
+ENVLINK
+ENVVAR
+ENXIO
+EOF
+EOFException
+EOPNOTSUPP
+EOUTOFNODES
+EOVERFLOW
+EPERM
+EPFNOSUPPORT
+EPG
+EPGNO
+EPHASE
+EPIPE
+EPRINT
+EPRINTed
+EPROCLIM
+EPROCUNAVAIL
+EPROGMISMATCH
+EPROGUNAVAIL
+EPROTO
+EPROTONOSUPPORT
+EPROTOTYPE
+ERANGE
+EREMOTE
+ERESTART
+EROFS
+ERPCMISMATCH
+ESHUTDOWN
+ESOCKTNOSUPPORT
+ESPIPE
+ESRCH
+ESTALE
+ESTD
+ETIME
+ETIMEDOUT
+ETOOMANYREFS
+ETXTBSY
+EUSERS
+EVENTITEMS
+EWOULDBLOCK
+EXCL
+EXDEV
+EXT
+Eefgh
+Eefh
+Egen
+Elp
+EnclosingMethod
+EncryptTest
+Endian
+EnhancedAccessor
+EntityBinding
+EntityConverter
+EntityInput
+EntityOutput
+EntityStore
+EntityToPersist
+Enum
+EnumInit
+EnumNetworkEvents
+Env
+EnvAttr
+EnvExample
+EnvGetEncryptFlags
+EnvIdReset
+EnvInfoDelete
+EnvLsnReset
+EnvOpen
+EnvRemove
+EnvSetErrfile
+EnvSetErrpfx
+EnvSetFlags
+EnvTest
+EnvVerbose
+Equidistributed
+Errcall
+Errfile
+Errno
+ErrorFunc
+ErrorSetup
+Errpfx
+EvalObjv
+EventFunc
+EventNotify
+EventType
+EvolveClasses
+ExampleDatabaseLoad
+ExampleDatabaseRead
+ExampleStore
+ExceptionUnwrapper
+ExceptionWrapper
+ExceptionWrapperTest
+Exp
+Externalizable
+FALLTHROUGH
+FBSDID
+FCHMOD
+FCLOSE
+FCNTL
+FCONTROL
+FD
+FDATASYNC
+FF
+FGETC
+FGETS
+FH
+FH's
+FILEDONE
+FILEID
+FILELIST
+FILENO
+FILEOPEN
+FILEOPS
+FILEWRITTEN
+FIXEDLEN
+FIXLEN
+FIXME
+FLD
+FLDID
+FMAP
+FML
+FMT
+FN
+FNAME
+FNAMES
+FNFE
+FOPEN
+FOREACH
+FP
+FREELIST
+FREESPACE
+FS
+FST
+FSTAT
+FSTATI
+FTRUNCATE
+FTYPE
+FULLSYNC
+FWRITE
+FastInputStream
+FastOutputStream
+FastOutputStreamTest
+FatalRecover
+Fchg
+Fd
+Ff
+Fh
+Fid
+FieldAddAndConvert
+FileIndexHigh
+FileIndexLow
+FileNotFound
+FileNotFoundException
+Fileinfo
+FindHashEntry
+FlushFileBuffers
+Fneeded
+FooImp
+Foreach
+ForeignKeyDatabase
+ForeignKeyDeleteAction
+ForeignKeyIndex
+ForeignKeyNullifer
+ForeignKeyTest
+ForeignMultiKeyNullifier
+FormatMessageA
+FreeBSD
+FreeBSD's
+FreeFunc
+FreeList
+Friedl
+GCC
+GEOM
+GETADDRINFO
+GETALL
+GETCWD
+GETDYNAMIC
+GETENV
+GETNAME
+GETOPT
+GETRECNO
+GETRUSAGE
+GETTIME
+GETTIMEOFDAY
+GETUID
+GETZIP
+GID
+Gb
+Gcc
+Gentles
+Get's
+GetByteArray
+GetByteArrayFromObj
+GetConfig
+GetDiskFreeSpace
+GetDiskFreeSpaceEx
+GetEnvironmentVariable
+GetFileInformationByHandle
+GetFlags
+GetFlagsList
+GetGlobPrefix
+GetHashValue
+GetIndexFromObj
+GetInfo
+GetIntFromObj
+GetJavaVM
+GetJoin
+GetLastError
+GetLockDetect
+GetLongFromObj
+GetLsn
+GetOpenFlag
+GetSystemTime
+GetSystemTimeAsFileTime
+GetTimeout
+GetUInt
+GetUnsignedIntFromObj
+GetVerbose
+GetVersion
+Gh
+Gizmo
+GlobalRefs
+GoDone
+GotRange
+HANDSOFF
+HASHC
+HASHC's
+HASHHDR
+HASHINSERT
+HASHLOOKUP
+HASHMAGIC
+HASHMETA
+HASHOLDVER
+HASHREMOVE
+HASHTAB
+HASHVERSION
+HCommand
+HDR
+HDRs
+HEURCOM
+HEURHAZ
+HEURMIX
+HEURRB
+HFS
+HKEYDATA
+HMAC
+HMETA
+HOFFDUP
+HOFFPAGE
+HOFFSET
+HOLDELECTION
+HPPA
+HPUX
+HSEARCH
+HSTAT
+HSTRERROR
+Harbison
+HashCompareTest
+HashStats
+Hashtable
+HelloDatabaseWorld
+Hmm
+Holder's
+Hsearch
+IA
+IAFTER
+IBEFORE
+IBTREE
+ICURRENT
+ID's
+IDLETIMEOUT
+IDs
+IFF
+IFILE
+IFILEMGR
+IFJDCS
+IFile
+IIL
+IIZ
+IIZLjava
+IL
+ILOCK
+ILo
+ILprR
+INADDR
+INDX
+INFOTYPE
+INI
+INIT
+INITED
+INITENV
+INITSPIN
+INMEM
+INMEMORY
+INORDER
+INREPELECT
+INT
+INTTYPES
+INVAL
+INVALIDID
+IOException
+IOExceptionWrapper
+IOSIZE
+IOVECS
+IP
+IPC
+IPPROTO
+IPv
+IR
+IREAD
+IRECNO
+IRGRP
+IRIX
+IROTH
+IRUSR
+ISALPHA
+ISBIG
+ISDIGIT
+ISDUP
+ISHELL
+ISPERM
+ISPRINT
+ISSET
+ISSPACE
+IV's
+IW
+IWGRP
+IWOTH
+IWR
+IWRITE
+IWUSR
+IXGRP
+IXOTH
+IXUSR
+Ick
+Ids
+Ik
+IllegalArgumentException
+IllegalStateException
+IncompatibleClassException
+IncrRefCount
+Index's
+IndexOutOfBoundsException
+Init
+Initialise
+Inline
+InnerClasses
+InsertSuperclass
+IntegrityConstraintException
+Interp
+InventoryDB
+IsAlive
+Istmp
+ItemNameIndexDB
+Itemname
+IterDeadlockTest
+JDB
+JE
+JEDB
+JHB
+JJ
+JKL
+JNI
+JNIEnv
+JNIs
+JOINCUR
+JOINENV
+JVM
+JZ
+JanFebMarAprMayJunJulAugSepOctNovDec
+JavaIO
+JavaRPCServer
+JoinTest
+KEYDATA
+KEYEMPTY
+KEYEXIST
+KEYFIRST
+KEYGROUP
+KEYGRP
+KEYLAST
+KEYLEN
+KL
+Kerberos
+KeyCreator
+KeyExtractor
+KeyField
+KeyRange
+KeyRangeException
+KeyRangeTest
+Krinsky
+LANGLVL
+LASTCKP
+LBTREE
+LCK
+LDF
+LDUP
+LEAFCHAIN
+LEAFLEVEL
+LEAFSEEN
+LF
+LFNAME
+LFPREFIX
+LG
+LGPL
+LIBNSL
+LL
+LOC
+LOCALTIME
+LOCKDOWN
+LOCKOBJ
+LOCKREGION
+LOCKREQ
+LOCKTAB
+LOCKTIMEOUT
+LOCKVERSION
+LOGC
+LOGFILEID
+LOGMAGIC
+LOGOLDVER
+LOGP
+LOGREADY
+LOGSONLY
+LOGVERSION
+LORDER
+LP
+LRECNO
+LRECNODUP
+LRU
+LRUness
+LSN
+LSN's
+LSNfile
+LSNoffset
+LSNs
+LSTAT
+LV
+LWARX
+LWP
+LWZ
+Landon
+Lastp
+Lcom
+LineNumberTable
+ListIterator
+ListObjAppendElement
+Ljava
+Ll
+LocalIterator
+LocalVariableTable
+LocalVariableTypeTable
+LockDetect
+LockDetectMode
+LockExample
+LockFileEx
+LockGet
+LockMode
+LockNotGrantedException
+LockOperation
+LockRequest
+LockRequestMode
+LockStat
+LockStats
+LockTimeout
+LockVec
+Lockfhp
+Lockfile
+LogArchive
+LogCompare
+LogFile
+LogFlush
+LogGet
+LogPut
+LogRegister
+LogSequenceNumber
+LogStat
+LogStats
+Logc
+LogcGet
+LpRsS
+LprRsS
+Lsn
+LtoR
+MALLOC
+MAMAMIA
+MARGO
+MASTERELECT
+MAXARGS
+MAXBQUALSIZE
+MAXBTREELEVEL
+MAXFIELD
+MAXGTRIDSIZE
+MAXID
+MAXINFOSIZE
+MAXLOCKS
+MAXMMAPSIZE
+MAXNR
+MAXPATHLEN
+MAXSIZE
+MAXSIZEONPAGE
+MAXTIMEOUT
+MAXWRITE
+MC
+MDups
+MEM
+MEMCMP
+MEMCPY
+MEMMAPPED
+MEMMOVE
+MEMP
+METADIRTY
+MFT
+MINCACHE
+MINFO
+MINIT
+MINLOCKS
+MINPAGECACHE
+MINWRITE
+MIPS
+MKS
+MLOCK
+MMAP
+MMDDhhmm
+MNO
+MP
+MPE
+MPFARRAY
+MPOOL
+MPOOLFILE
+MPOOLFILE's
+MPOOLFILEs
+MPREG
+MPREGs
+MPROTECT
+MSB
+MSC
+MSDN
+MSEM
+MSG
+MSGBUF
+MSGS
+MSHUTDOWN
+MSLEEP
+MSTR
+MSVC
+MT
+MULTIVERSION
+MUNLOCK
+MUNMAP
+MUTEX
+MUTEXMGR
+MUTEXREGION
+MUTEXes
+MVCC
+MVS
+MYDIRECTORY
+Makoto
+Malloc
+MapEntry
+MapViewOfFile
+Margo
+MarshalledEnt
+MarshalledEntityBinding
+MarshalledKey
+MarshalledKeyBinding
+MarshalledObject
+MarshalledTupleData
+MarshalledTupleEntry
+MarshalledTupleKeyEntity
+Matsumoto
+MaxID
+Maxid
+Maxkey
+Mb
+Mbytes
+McIlroy's
+MemoryException
+Mempool
+Mersenne
+Metadata
+Metapage
+MinGW
+Minkey
+Misc
+MixColumn
+MkDir
+Mobilus
+MonTueWedThuFriSatSun
+MoveFile
+MoveFileEx
+Mp
+MpGet
+MpInfoDelete
+MpStat
+MpSync
+MpTrickle
+Mpool
+MpoolExample
+Mpoolfile
+Msg
+MsgType
+Msgcall
+Msgfile
+MultiKeyCreator
+Multihop
+Multiversion
+Mutex
+MutexStats
+Mutexes
+Mv
+MyClass
+MyDbs
+MyEntity
+MySubtype
+MyType
+NB
+NBUCKET
+NCACHE
+NCACHED
+NDBM
+NDIR
+NDX
+NEEDSPLIT
+NEEDSWAP
+NEWCLIENT
+NEWFILE
+NEWMASTER
+NEWSITE
+NEXTFILE
+NEXTINFO
+NG
+NL
+NOARCHIVE
+NOAUTO
+NOAUTOINIT
+NOBUFFER
+NOCOPY
+NODENAME
+NODUP
+NODUPDATA
+NODUPS
+NOERROR
+NOFILE
+NOHEADER
+NOKEY
+NOLOCK
+NOLOCKING
+NOLSN
+NOMIGRATE
+NOMMAP
+NOMORE
+NOORDERCHK
+NOOVERWRITE
+NOPANIC
+NOPROMOTE
+NORECNUM
+NORUN
+NOSERVER
+NOSORT
+NOSYNC
+NOTA
+NOTEXIST
+NOTFOUND
+NOTGRANTED
+NOTLOGGED
+NOTPERM
+NOTREACHED
+NOTSET
+NOTUSED
+NOTYPE
+NOTZERO
+NOWAIT
+NP
+NRECS
+NREGION
+NS
+NSLOTS
+NT
+NTFS
+NULL'ing
+NULLXID
+NULLing
+NULLs
+NULs
+NUM
+NUMERICHOST
+NUMWRITES
+NameToInfo
+NameToPtr
+Ndbc
+Ndbm
+NdbmOpen
+NewEntityName
+NewInfo
+NewName
+NewStringObj
+Newfile
+Nikunj
+Nishimura
+NoFields
+NoMutation
+NoP
+NonKeyField
+NonKeys
+NoqV
+NqV
+Nr
+NrV
+NsV
+NsVv
+Nuff
+NullClassCatalog
+NullPointerException
+NullTransactionRunner
+Num
+Nxt
+OBJ
+ODDFILESIZE
+OFFDUP
+OFFPAGE
+OFM
+OLDVER
+OLDVERSION
+ONC
+OOB
+OP
+OPD
+OPENFILES
+OPFLAGS
+OPS
+OR'd
+ORDERCHKONLY
+OSF
+OSO
+OSS
+OUTFILE
+OVFL
+Obj
+ObjectInputStream
+ObjectOutputStream
+ObjectStreamClass
+ObjectToPrimitive
+ObjectToSubtype
+ObjectToUnrelatedOther
+ObjectToUnrelatedSimple
+Objs
+Offpage
+Ol
+Ooops
+OpenFile
+OpenFileMapping
+OpenServer
+OperationStatus
+Ops
+Optimised
+OutOfMemoryError
+OutputStream
+PAGEDONE
+PAGEINFO
+PAGEINFOs
+PAGELIST
+PAGEs
+PANIC'd
+PARAMS
+PARENT's
+PBNYC
+PF
+PG
+PGDEF
+PGINFO
+PGNO
+PGSIZE
+PHP
+PID
+PKG
+PLIST
+PMerge
+POPENFILES
+POSIX
+POSTDESTROY
+POSTLOG
+POSTLOGMETA
+POSTOPEN
+POSTSYNC
+PPC
+PR
+PREAD
+PREPLIST
+PREV
+PRI
+PRINTF
+PRINTFOOTER
+PRINTHEADER
+PROT
+PSIZE
+PSTAT
+PTHREAD
+PTHREADS
+PWRITE
+PaRisc
+PackedIntegerTest
+Pagesize
+Pagesizes
+Params
+Part's
+PartBinding
+PartData
+PartKey
+PartKeyBinding
+PartValue
+PartialGetTest
+Paulo
+Perl
+PersistToEntity
+PersonStore
+Pg
+PgInit
+PgIsset
+Pgin
+Pgno
+Phong
+Pid
+PlatformSDK
+Posix
+PowerPC
+Pre
+PreparedTransaction
+Prev
+PriKey
+PrimaryKey
+PrimaryKeyAssigner
+PrimitiveToObject
+Proc
+ProxiedClass
+Pthread
+Ptr
+PtrToInfo
+QAM
+QAMDATA
+QAMMAGIC
+QAMMETA
+QAMOLDVER
+QAMVERSION
+QMETA
+QNX
+QPAGE
+QSORT
+QUIESCED
+QUOTESERVER
+Qsort
+QueueStats
+RB
+RBBASE
+RBCOMMFAIL
+RBDEADLOCK
+RBEND
+RBINTEGRITY
+RBOTHER
+RBPROTO
+RBROLLBACK
+RBTIMEOUT
+RBTRANSIENT
+RCLOSE
+RDONLY
+RDWRMASTER
+READLOCK
+READONLY
+REALLOC
+REALLOC'ed
+REC
+RECLEN
+RECNO
+RECNOSYNC
+RECNUM
+RECORDCOUNT
+RECOVERYTEST
+REGENV
+REGINFO
+REGIONs
+REGMAINT
+RELEN
+RELIANTUNIX
+REM
+RENAMEMAGIC
+REPCTL
+REPFLAGS
+REPLICATIONMGR
+REPLOCKED
+REPMGR
+REPQUOTE
+REPSTART
+REPTEST
+REPVERSION
+REQ
+REQs
+REREQUEST
+REUSEADDR
+REVERSECONCAT
+REVERSEDATA
+REVSPLITOFF
+RIJNDAEL
+RINTERNAL
+RIW
+RLOCK
+RM
+RMDIR
+RMERR
+RMFAIL
+RMID
+RMNAMESZ
+RMW
+RMs
+ROP
+RPC
+RPCCLIENT
+RPCExample
+RPCGEN
+RPRINT
+RT
+RTTarget
+RUNLOG
+RUNRECOVERY
+RUSAGE
+RandCommand
+RangeExceeded
+RangeKeyNotEqual
+RangeNotFound
+RawObject
+RawStore
+RawType
+ReadOnly
+Realloc
+Rec
+Recno
+Recnos
+RecordNumberBinding
+RecordNumberFormat
+RecoveryOperation
+Refcnt
+Refcount
+ReflectPermission
+Reinit
+RenamedEntity
+Renamer
+RepConfig
+RepConfigInfo
+RepElect
+RepElectResult
+RepFlush
+RepGetConfig
+RepLease
+RepLimit
+RepMgr
+RepMgrStat
+RepNoarchiveTimeout
+RepProcessMessage
+RepQuoteExample
+RepRequest
+RepStart
+RepStat
+RepSync
+RepTransport
+RepVersion
+ReplicationConfig
+ReplicationDuplicateMasterException
+ReplicationHandleDeadException
+ReplicationHoldElectionException
+ReplicationHostAddress
+ReplicationJoinFailureException
+ReplicationLeaseExpiredException
+ReplicationLeaseTimeoutException
+ReplicationLockoutException
+ReplicationManagerAckPolicy
+ReplicationManagerSiteInfo
+ReplicationManagerStartPolicy
+ReplicationManagerStats
+ReplicationSiteUnavailableException
+ReplicationStats
+ReplicationStatus
+ReplicationTimeoutType
+Repmgr
+Repmgr's
+RepmgrConfigTest
+RepmgrElectionTest
+RepmgrStartupTest
+ResetResult
+ResolvePath
+ReturnSetup
+Rieffel
+Rijmen
+Rijndael
+Roeber
+Rp
+RpcDb
+RpcDbEnv
+RpcDbTxn
+RpcDbc
+RtoL
+RunRecoveryException
+RuntimeException
+RuntimeExceptionWrapper
+RuntimeInvisibleAnnotations
+RuntimeInvisibleParameterAnnotations
+RuntimeVisibleAnnotations
+RuntimeVisibleParameterAnnotations
+Rusage
+SC
+SCHED
+SCO
+SCO's
+SDK
+SDups
+SECS
+SEGDATA
+SEGID
+SEM
+SEMA
+SENDEVENT
+SEP
+SEQ
+SERVERPROG
+SERVERVERS
+SET's
+SETALL
+SETCURSOR
+SETFD
+SETVAL
+SGI
+SHA
+SHALLOC
+SHASH
+SHM
+SHMEM
+SHMGET
+SHQUEUE
+SIGABRT
+SIGACTION
+SIGALRM
+SIGCHLD
+SIGPIPE
+SIZEOF
+SKIPFIRSTKEY
+SKU
+SMerge
+SNPRINTF
+SORTPAGE
+SPACEONLY
+SPL
+SPLITOLD
+SPRINTF
+SS
+SSLeay
+SSZ
+STAILQ
+STARTSYNC
+STARTUPDONE
+STAT
+STATS
+STCWX
+STD
+STDC
+STDERR
+STDINT
+STDLIB
+STK
+STR
+STRCASECMP
+STRCAT
+STRCHR
+STRDUP
+STRERROR
+STRFTIME
+STRLIST
+STRNCAT
+STRNCMP
+STROFFSET
+STRRCHR
+STRSEP
+STRTOL
+STRTOUL
+STRUCT
+STWCX
+SUBDB
+SWAPBYTES
+SWIG's
+SWITCHes
+SWR
+SYMBIAN
+SYSCONF
+SYSTEMTIME
+SampleDatabase
+SampleViews
+Schlossnagle
+SecKeys
+SecondaryDeadlockTest
+SecondaryKey
+Sedgewick
+Seq
+SeqGet
+SeqGetFlags
+SeqNo
+SeqOpen
+SequenceExample
+SequenceStats
+SerialBinding
+SerialBindingTest
+SerialFormat
+SerialInput
+SerialOutput
+SerialSerialBinding
+SerialSerialKeyExtractor
+SetEndOfFile
+SetFilePointer
+SetFilePointerEx
+SetInfoData
+SetListElem
+SetListElemInt
+SetListElemWideInt
+SetListRecnoElem
+SetMultiList
+SetObjResult
+ShipmentBinding
+ShipmentByPart
+ShipmentBySupplier
+ShipmentData
+ShipmentKey
+ShipmentKeyBinding
+ShipmentValue
+Shm
+SibDup
+Signalling
+SimpleBuffer
+SimpleStoreGet
+SimpleStorePut
+Skiplist
+Skodon
+Sleepycat
+Something's
+SortedDuplicates
+SortedMap
+SortedSet
+SourceDebugExtension
+SourceFile
+Sparc
+Splitp
+Standalone
+Stat
+Stats
+Std
+Stdout
+Steele
+StoredClassCatalog
+StoredClassCatalogTest
+StoredClassCatalogTestInit
+StoredCollection
+StoredCollections
+StoredContainer
+StoredEntrySet
+StoredIterator
+StoredKeySet
+StoredList
+StoredMap
+StoredMapEntry
+StoredSortedEntrySet
+StoredSortedKeySet
+StoredSortedMap
+StoredSortedValueSet
+StoredValueSet
+StringBuffer
+StringDbt
+Subdatabase
+Subdatabases
+Subdb
+Subname
+SunOS
+SupplierBinding
+SupplierByCity
+SupplierData
+SupplierKey
+SupplierKeyBinding
+SupplierValue
+Symbian
+SystemInfo
+TAILQ
+TAS
+TCHAR
+TCL
+TCLDB
+TDS
+TESTDIR
+TESTTESTEST
+TESTXADIR
+THR
+TID
+TIMESPEC
+TLPUT
+TM
+TMASYNC
+TMENDRSCAN
+TMER
+TMERR
+TMFAIL
+TMJOIN
+TMMIGRATE
+TMMULTIPLE
+TMNOFLAGGS
+TMNOFLAGS
+TMNOMIGRATE
+TMNOWAIT
+TMONEPHASE
+TMP
+TMPDIR
+TMREGISTER
+TMRESUME
+TMSTARTRSCAN
+TMSUCCESS
+TMSUSPEND
+TMUSEASYNC
+TMs
+TODO
+TOPLEVEL
+TPC
+TPCB
+TPINIT
+TPS
+TRU
+TRUNC
+TRUNCDATA
+TS
+TSTRING
+TX
+TXN
+TXNAPP
+TXNHEAD
+TXNID
+TXNLIST
+TXNLOGREC
+TXNMGR
+TXNREGION
+TXNS
+TXNVERSION
+TXNs
+Takuji
+Tbuf
+Tcl
+Tcl's
+Teardown
+TempFolder
+TestAppendRecno
+TestAssociate
+TestCallback
+TestClassCatalog
+TestClosedDb
+TestConstruct
+TestDataBinding
+TestDbtFlags
+TestEntity
+TestEntityBinding
+TestEnv
+TestGetSetMethods
+TestKeyAssigner
+TestKeyExtractor
+TestKeyRange
+TestLockVec
+TestLogc
+TestOpenEmpty
+TestReplication
+TestRpcServer
+TestSR
+TestSameDbt
+TestSerial
+TestSimpleAccess
+TestStat
+TestStore
+TestTruncate
+TestTxn
+TestUtil
+TestUtils
+TestXAServlet
+Thang
+Thies
+ThreadId
+ThreadIdString
+ThreadOne
+ThreadTwo
+Threshhold
+Throwable
+TimeUnits
+Tmp
+Topher
+Torek
+TpcbExample
+TransactionRunner
+TransactionStats
+TransactionTest
+TransactionTests
+TransactionWorker
+Tru
+Tt
+TupleBinding
+TupleBindingTest
+TupleFormat
+TupleFormatTest
+TupleInput
+TupleInputBinding
+TupleMarshalledBinding
+TupleOrderingTest
+TupleOutput
+TupleSerialBinding
+TupleSerialDbFactory
+TupleSerialDbFactoryTest
+TupleSerialEntityBinding
+TupleSerialFactoryTest
+TupleSerialKeyExtractor
+TupleSerialMarshalledBinding
+TupleSerialMarshalledKeyExtractor
+TupleTupleBinding
+TupleTupleKeyExtractor
+TupleTupleMarshalledBinding
+TupleTupleMarshalledKeyExtractor
+Txn
+TxnCheckpoint
+TxnGuide
+TxnGuideDPL
+TxnGuideInMemory
+TxnInfoDelete
+TxnRecover
+TxnStat
+TxnTimeout
+Txnid
+Txns
+UI
+UID
+UINT
+ULONG
+UMRW
+UNAVAIL
+UNDEF
+UNDOC
+UNICODE
+UNISTD
+UNPv
+UNREF
+UOC
+UPDATEROOT
+UPREFIX
+USEC
+USERCOPY
+USERMEM
+UTF
+UTFDataFormatException
+UTS
+UVW
+UX
+Unencrypted
+Unhandled
+Unicode
+UnixLib
+UnixWare
+Unixware
+UnknownError
+UnlockFile
+UnmapViewOfFile
+Unmarshall
+UnsupportedOperationException
+UpdateConflictException
+UseBaseclass
+UseSubclass
+UtfOps
+UtfTest
+Util
+VC
+VER
+VM
+VMPAGESIZE
+VOTEs
+VRFY
+VSNPRINTF
+VTALLY
+VX
+VXWORKS
+Var
+Varargs
+Vc
+VendorDB
+VerboseConfig
+VersionMismatchException
+Vo
+Voter's
+Vv
+VvW
+VvXxZ
+Vvw
+Vx
+VxWorks
+WAITSFOR
+WAKEME
+WATCOM
+WLInitialContextFactory
+WORDLIST
+WRITECURSOR
+WRITELOCK
+WRITEOPEN
+WRNOSYNC
+WRONLY
+WSA
+WSACleanup
+WSAStartup
+WSAWaitForMultipleEvents
+WT
+WW
+WWRITE
+Waitsfor
+WebLogic
+Wikieup
+WinCE
+WinNT
+WinXP
+WithConverter
+WithDeleter
+WithRenamer
+WriteFile
+X's
+XA
+XAER
+XAException
+XAResource
+XID
+XIDDATASIZE
+XMIT
+XOR'd
+XP
+XPG
+XXX
+Xid
+XxZ
+YIELDCPU
+YY
+YYMMDDhhmm
+Yongmin
+ZED
+ZF
+Zero'd
+aValues
+aa
+aaA
+aaB
+aaC
+aaD
+aaa
+aaaaa
+aaaaaa
+aaaaaaaaaaaaaaaaaaaaaa
+aaaaab
+aaaaac
+aab
+aac
+aad
+ab
+abc
+abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq
+abcdef
+abcdefghijklmnopqrstuvwxuz
+abcdefghijklmnopqrstuvwxyz
+abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
+abs
+abshome
+absname
+abspath
+ac
+aca
+accessor
+ack
+ack'ed
+ack's
+acknowledgement
+acknowledgements
+acks
+acplt
+activekids
+activep
+activepips
+actualData
+actualKey
+acurs
+ada
+add'l
+addAll
+addAnnotation
+addField
+addfamilylocker
+addl
+addpage
+addr
+addrem
+addrinfo
+addrlen
+addrp
+addrq
+adh
+adj
+adjindx
+adlsVv
+admin
+adv
+afterop
+ahr
+alanb
+alexg
+alfred
+alg
+algsetup
+alignp
+alldb
+alloc
+alloc'ed
+alloc'ing
+alloced
+allocs
+allpeers
+allreq
+alsVv
+alsn
+amd
+amx
+antoon
+anum
+ap
+aparts
+api
+apologise
+app
+app's
+appendRecordNumber
+appendrecnotest
+appexit
+appl
+appname
+appnotes
+apprec
+apps
+aq
+archivable
+archivedir
+areDuplicatesAllowed
+areDuplicatesOrdered
+areFormatsChanged
+areKeysRenumbered
+arg
+argp
+args
+argv
+arr
+arraycopy
+arraysz
+arw
+asap
+asites
+asprintf
+assertEquals
+associatetest
+astubble
+ata
+atoi
+atol
+att
+attr
+autoCommit
+autocommit
+autoconf
+autoconfig
+autoremove
+avg
+awk
+baaaaa
+backoff
+backpointer
+badend
+badgen
+badkey
+badnum
+bak
+bam
+bamc
+barkVolume
+barreto
+baseClass
+bb
+bba
+bbbbb
+bcopy
+bcurs
+bd
+bdb
+bdbExistingStaticBlock
+bdbGetField
+bdbIsPriKeyFieldNullOrZero
+bdbNewArray
+bdbNewInstance
+bdbReadNonKeyFields
+bdbReadPriKeyField
+bdbReadSecKeyFields
+bdbSetField
+bdbWriteNonKeyFields
+bdbWritePriKeyField
+bdbWriteSecKeyFields
+bdbcmds
+bde
+beginInclusive
+beginKey
+beq
+beql
+berkdb
+berkdbcmds
+berkeley
+bfname
+bfree
+bh
+bharray
+bhfree
+bhp
+bhwrite
+bi
+bigending
+bigint
+bigpages
+bigpair
+binding's
+bitmask
+bitmasks
+bk
+blbs
+blk
+blksize
+blockDecrypt
+blockEncrypt
+blockSize
+blockable
+blocknum
+blocksize
+bmeta
+bmp
+bndx
+bne
+bnum
+booleanValue
+bosselaers
+bostic
+bp
+bqual
+br
+broot
+bs
+bshift
+bsize
+bsizep
+bt
+btcompare
+btrec
+btree
+btreemeta
+btrees
+bucketsp
+buf
+bufp
+bufs
+bufsize
+buildpartial
+builtin
+bulkbufsize
+bulkbufsz
+bumpSize
+bval
+bylsn
+bypage
+byteLen
+byteOffset
+byteValue
+bytecode
+byteorder
+bytesExpected
+bytesToChars
+bytesize
+bytesp
+byteswap
+byteswapped
+bytevalue
+cDd
+cDuVv
+cEelmNrtVZ
+cEelmNrtVxZ
+caaaaa
+cachemax
+cachep
+cachesize
+cachesz
+cadjust
+call's
+callback
+callback's
+callbacks
+callbacktest
+callbk
+calloc
+callocs
+callpgin
+canonname
+capi
+carray
+catalogDb
+catalogtest
+cbts
+cc
+ccclose
+ccnext
+ccset
+ccurs
+cd
+cdata
+cdb
+cdel
+cds
+cdsgroup
+ceVv
+cefVv
+cefh
+ceh
+celmNrtVZ
+celmNtV
+celmNtVZ
+cfile
+cfp
+cget
+cgetchk
+ch
+char's
+charLen
+charLength
+charOffset
+charValue
+charkey
+charset
+checkgen
+checklocker
+chgpg
+childcursor
+childinc
+childproof
+childput
+childs
+chk
+chkpoint
+chkpt
+chkspace
+chksum
+chmod
+chongo
+ci
+cip
+cipherInit
+cipherInstance
+cipherUpdateRounds
+ckp
+ckplsn
+cksum
+cl
+classCatalog
+classID
+className
+classpath
+cleandir
+clearIndexKey
+clearerr
+clib
+clientData
+clientdb
+clientrun
+clinit
+clist
+clnt
+clockskew
+closeEnv
+closeddbtest
+closeevent
+closefiles
+closehandle
+closeme
+cls
+cmap
+cmd
+cmdargs
+cmds
+cmp
+cmpwi
+cnt
+codegen
+com
+comm
+compareDuplicates
+compareproc
+comparitors
+compat
+concatdatakey
+concatkeydata
+cond
+conf
+config
+configname
+connfail
+const
+containsAll
+containsKey
+containsValue
+conv
+convprintable
+copyFrom
+copyin
+copyleft
+copymap
+copyout
+copypage
+copypair
+countElements
+countRecords
+countp
+cp
+cpage
+cpp
+cputchk
+cq
+cr
+crdel
+creat
+createflag
+crypto
+cs
+csearch
+csp
+csv
+ct
+ctime
+ctlflags
+ctp
+ctp's
+ctps
+ctx
+ctxn
+cuVv
+curadj
+curalloc
+curdata
+curfile
+curinfo
+curinval
+curlist
+curlsn
+curroffset
+curslen
+curslist
+cursp
+cursq
+curtime
+curwalk
+customerName
+cutlim
+cuz
+cxx
+cxxproc
+cxxthis
+cxxutil
+dKB
+das
+dat
+dataBinding
+dataInput
+dataOutput
+dataToObject
+databasetest
+databuf
+datalen
+datap
+datapage
+datasize
+datastr
+datastring
+datatype
+db
+dbFileName
+dbOpenFlags
+dbc
+dbc's
+dbca
+dbcb
+dbcl
+dbclear
+dbclient
+dbclose
+dbcmds
+dbcp
+dbcursor
+dbdata
+dbdel
+dbdemo
+dbdp
+dbe
+dbentry
+dbenv
+dbh
+dbinc
+dbinfo
+dbinit
+dbip
+dbj
+dbjoin
+dbkill
+dblist
+dblistlinks
+dblp
+dbm
+dbmclose
+dbmeta
+dbmfp
+dbminit
+dbmp
+dbname
+dbnamep
+dbo
+dbobj
+dbopen
+dbp
+dbp's
+dbpp
+dbprep
+dbps
+dbq
+dbrdonly
+dbreg
+dbregister
+dbremove
+dbrename
+dbs
+dbsizes
+dbsrv
+dbt
+dbta
+dbtb
+dbtp
+dbtruncate
+dbts
+dbtxn
+dbtype
+dbuf
+dbverify
+dbx
+dcursor
+dd
+dda
+ddbt
+deadfile
+deadlocker
+deadmap
+dec
+decrpyting
+def
+defcmp
+defg
+defpfx
+defto
+del
+delayclient
+deletable
+deleteAction
+delext
+delim
+delimp
+delpg
+denom
+dereffing
+des
+desc
+descheduled
+deserialize
+deserialized
+deserializing
+dest
+detectp
+dev
+df
+dft
+dh
+diff
+difflen
+difftime
+dir
+dir's
+directio
+dirent
+dirf
+dirfno
+dirfree
+dirlist
+dirname
+dirp
+dirs
+dirtyRead
+dist
+dists
+ditem
+dl
+dlbytes
+dlen
+dlfcn
+dll
+dlopen
+dm
+dname
+dndx
+doGet
+doTimeouts
+doWork
+dobj
+docRoot
+doevents
+donext
+doreq
+doubleToLongBits
+doubleValue
+dp
+dpagep
+dpages
+dpair
+dpgno
+dr
+dref
+dremoved
+dropAnnotation
+dropField
+ds
+dsearch
+dsize
+dst
+dsync
+dt
+dtab
+dtabsize
+dumptree
+dup
+dup'ed
+dupcnt
+dupcompare
+duperr
+duplicate's
+dupmaster
+dupmasters
+dupped
+dups
+dupset
+dupsort
+duptree
+duptype
+dwNumberOfProcessors
+dx
+eax
+ebuf
+edu
+efg
+efh
+egen
+eid
+eid's
+eidp
+eids
+ek
+ele
+electinit
+electsend
+electtally
+electvote
+electwait
+elem
+elp
+emailAddresses
+emap
+employerIds
+emt
+encrpyting
+encryptaes
+encryptany
+encrypttest
+endInclusive
+endKey
+endian
+endianness
+endif
+endname
+endodata
+endofile
+endpath
+endsWith
+endtime
+enqueuing
+ent
+entityBinding
+entrySet
+entryp
+enum
+enumType
+enums
+env
+env's
+envFlags
+envHome
+envcl
+envdata
+envdir
+envdp
+envid
+envip
+envlock
+envp
+envpanic
+envparent
+envreg
+envremove
+envrpcserver
+envs
+eobj
+eof
+eor
+ep
+erlangen
+errbuf
+errcall
+errfile
+errlock
+errlst
+errno
+errnum
+erroring
+errorret
+errpfx
+errstream
+errunlock
+errx
+esat
+esec
+esp
+etime
+eusec
+eval
+eventDb
+eventproc
+exactp
+exampleStore
+excl
+excxx
+exe
+exid
+exnum
+expandtab
+expectEnhanced
+expectNoClassChanges
+expr
+ext
+extentsize
+extentsizep
+externalizable
+externs
+extid
+extractIndexKey
+fN
+failcheck
+failchk
+faq
+faststat
+faultmem
+favoriteColors
+fc
+fcchk
+fchk
+fchmod
+fclose
+fcn
+fcntl
+fcreate
+fd
+fd's
+fdatasync
+fdlist
+fdlock
+fdm
+fdp
+fdupcurs
+feedback's
+ferr
+ff
+ffactor
+ffactorp
+fff
+fget
+fgetc
+fgets
+fh
+fhp
+fid
+fids
+fieldName
+fieldlist
+fieldno
+fileID
+fileIDs
+filedone
+filehandle
+fileid
+fileids
+fileinfo
+fileinit
+filelist
+filemode
+filenamep
+filenum
+fileopen
+fileops
+filestart
+fillf
+fillfactor
+fillpercent
+finalcount
+findData
+findFirst
+finddatum
+findlastckp
+finfo
+finickyLevel
+firstKey
+firstkey
+fiv
+fixup
+fixups
+flagN
+flagsp
+floatToIntBits
+floatValue
+flushcommit
+fmax
+fmethod
+fmsg
+fmt
+fn
+fnam
+fname
+fnl
+fnp
+fns
+fnum
+foo
+fopen
+forName
+foreach
+foreignStore
+foreignkey
+form's
+format's
+formatID
+fp
+fprintf
+fprobe
+fptr
+fput
+fq
+freakin
+free'd
+free'ing
+freeable
+freeaddrinfo
+freedata
+freefamilylocker
+freelist
+freelock
+freelocker
+freelst
+freeonly
+freep
+freespace
+fremove
+freq
+friedl
+fromInclusive
+fromIndex
+fromKey
+fromMapEntry
+fromValue
+frotzed
+fs
+fset
+fst
+fstat
+fstati
+fsync
+ftab
+ftruncate
+ftype
+fullName
+fullhome
+fullname
+fullnew
+func
+funcs
+fv
+fwd
+fwrite
+fzero
+gapflags
+gbytes
+gbytesp
+gc
+gc'ed
+gcc
+gcount
+gdb
+gen
+genrand
+george
+getBranchQualifier
+getBytes
+getCity
+getClass
+getClassFormat
+getCollection
+getCurrentKey
+getDBFileName
+getData
+getDbEnv
+getDbt
+getDbtString
+getDetail
+getEnvironment
+getErrno
+getField
+getFlags
+getFormatId
+getGlobalTransactionId
+getIndex
+getInstance
+getLock
+getMode
+getName
+getNext
+getObj
+getObject
+getOffset
+getOp
+getPartialLength
+getPartialOffset
+getPrimaryKeyFormat
+getPrimitiveBinding
+getProperty
+getRecno
+getRecordNumber
+getSize
+getString
+getTime
+getTimeout
+getUserBufferLength
+getValue
+getValueFormat
+getactive
+getaddr
+getaddrinfo
+getboth
+getbothc
+getckp
+getconfig
+getcwd
+getdata
+getdynamic
+getenv
+gethostbyname
+getindex
+getinfo
+getitrs
+getjoin
+getline
+getlocker
+getlong
+getname
+getnext
+getno
+getobj
+getopt
+getpageinfo
+getpid
+getrusage
+getstack
+getsubopt
+gettime
+gettimeofday
+gettingstarted
+gettype
+getuid
+getulong
+getval
+getzip
+ghi
+gi
+gid
+gm
+gmail
+gmtoff
+golive
+gonna
+gotkey
+gotta
+grep
+groupalloc
+groupgrow
+gsf
+gsg
+gsp
+gtrid
+guesspgsize
+guesstimated
+hEvent
+hamc
+handle's
+handleException
+happend
+hardcode
+hardcoding
+hasNext
+hasPrevious
+hashCode
+hashcompare
+hashcomparetest
+hashhdr
+hashinit
+hashmeta
+hashp
+hashproc
+hashtab
+hc
+hcp
+hcreate
+hdestroy
+hdr
+hdrbuf
+hdrchk
+hdrpages
+hdrs
+headMap
+headSet
+header's
+headp
+heldby
+helloworld
+hf
+hfp
+hijkl
+himark
+histdbt
+hlock
+hm
+hmac
+hmeta
+hmm
+holdl
+homep
+homeroot
+hostaddr
+hostname
+hotbackup
+hotcopy
+hotupdate
+hp
+hq
+href
+hs
+hsearch
+hstrerror
+htab
+html
+htonl
+http
+httpd
+iX
+ia
+icursor
+idShadow
+idbase
+idletimeout
+idleto
+idmap
+idnum
+idp
+ids
+idspace
+idup
+idup'ed
+iface
+ifdef
+ifdef's
+iff
+ifmt
+ifndef
+ifp
+ihold
+iitem
+ik
+ilock
+ilocks
+impl
+inc
+incfirst
+incomp
+incr
+incursor
+ind
+indexCursor
+indexKey
+indexKeyData
+indexKeyFormat
+indexKeyOutput
+indexKeys
+indexOf
+indexViews
+indexlist
+indices
+indx
+indxp
+info's
+infop
+informatik
+ini
+init
+init'ing
+inited
+initialSize
+inits
+initspin
+inlen
+inline
+inmem
+inmemdbflags
+inmemlist
+inmemory
+ino
+inode
+inorder
+inp
+inpitem
+inputOctets
+inregion
+insdel
+insertpair
+int
+intBitsToFloat
+intValue
+intel
+interp
+intial
+ints
+inttypes
+inuse
+inval
+inventoryDB
+inventorydb
+io
+ioinfo
+iopsize
+iosDevFind
+iovec
+iovecs
+ip
+ipc
+ipcs
+iq
+iread
+isAutoCommit
+isByteLen
+isCatNotDog
+isDirtyReadAllowed
+isDirtyReadEnabled
+isEmpty
+isIndexed
+isOrdered
+isTransactional
+isWriteAllowed
+isalive
+isalpha
+isbad
+isbigendian
+isdeleted
+isdigit
+isdone
+isdst
+isdup
+islease
+isolder
+isopd
+ispget
+isprint
+isroot
+isspace
+istmp
+isvalid
+isync
+itemcount
+itemname
+itemnameDB
+itemorder
+iter
+ith
+iwr
+iwrite
+java
+java's
+javaagent
+javax
+jbyte
+jc
+je
+jenv
+jhi
+jl
+jlong
+jmsjdbc
+jndi
+journaling
+jp
+jq
+jrpcgen
+jt
+jta
+kBoolean
+kByte
+kCharacter
+kComposite
+kDouble
+kFloat
+kInteger
+kLong
+kShort
+kb
+kboolean
+kbyte
+kbytes
+kchar
+kcomposite
+kdouble
+keio
+key's
+keyAssigner
+keyBinding
+keyClass
+keyExtractor
+keyFormat
+keyInput
+keyInstance
+keyLen
+keyMaterial
+keyName
+keyOutput
+keySet
+keyTypes
+keybuf
+keyexists
+keyfirst
+keyfive
+keyflag
+keyfour
+keygroup
+keygroups
+keygrp
+keylast
+keynum
+keyone
+keyp
+keyrange
+keysSize
+keysize
+keystr
+keystring
+keythree
+keytwo
+kfloat
+kgnum
+ki
+kidsp
+killid
+killinterval
+killiteration
+killtest
+kint
+klNpP
+klNprRV
+klNprRs
+klen
+klinks
+klong
+kow
+kp
+kpv
+krinsky
+ks
+kshort
+kuleuven
+lM
+lP
+lSN
+lang
+last's
+lastError
+lastIndexOf
+lastKey
+lastfile
+lastid
+lastpgno
+later's
+lbtree
+lbucket
+lbuf
+lc
+lcnt
+ld
+ldata
+ldbp
+ldbs
+ldbt
+ldbtsize
+ldcws
+ldl
+ldstub
+le
+len
+lenp
+les
+levelp
+lf
+lfhp
+lfname
+lg
+lget
+lh
+lhash
+lhi
+li
+lib
+libc
+libdb
+libfile
+libname
+libpthread
+libresolv
+libthread
+lineno
+linux
+listIterator
+listobj
+listp
+listsize
+lk
+lkrs
+ll
+lld
+llsn
+llu
+lm
+ln
+lnP
+lnsl
+loadEnvVars
+loadme
+loc
+localhost
+localtime
+lockForWrite
+lockGet
+lockVector
+lockcount
+lockdown
+locker's
+lockerid
+lockevent
+lockfhp
+lockid
+lockinfo
+lockmgr
+lockmode
+lockobj
+lockop
+lockreq
+locksteals
+lockstep
+locktimeout
+logbuf
+logbufsize
+logbufsz
+logc
+logclean
+logdir
+logend
+logfile
+logfiles
+logflush
+loggap
+logmaxset
+logmsg
+logready
+logrec
+logreq
+logset
+logsonly
+logv
+longBitsToDouble
+longValue
+longtest
+lorder
+lorderp
+lowlsn
+lp
+lpBuffer
+lpgno
+lprint
+lput
+lrand
+lref
+lrp
+lru
+lsVv
+lseek
+lsn
+lsnadd
+lsnget
+lsninit
+lsnoff
+lsnp
+lsynch
+lt
+ltm
+lu
+luB
+luGB
+luK
+luKB
+luKb
+luM
+luMB
+luMb
+lvalue
+lwarx
+lwp
+lx
+mNP
+mNs
+machid
+machlock
+machtab
+maddr
+magicno
+maintinit
+maj
+majver
+makeKey
+makecopy
+makedup
+malloc
+malloc'd
+malloc'ed
+malloc's
+mallocing
+mallocs
+manyToMany
+manyToOne
+mapEntry
+mapfile
+margo
+markdone
+markneeded
+markus
+marshalIndexKey
+marshalled
+marshalling
+matumoto
+maxKey
+maxRetries
+maxb
+maxcommitperflush
+maxhlocks
+maxhobjects
+maxid
+maxkey
+maxkeypage
+maxlockers
+maxlocks
+maxlsn
+maxlsteals
+maxn
+maxnactive
+maxnlockers
+maxnlocks
+maxnobjects
+maxnsnapshot
+maxobjects
+maxopenfd
+maxops
+maxosteals
+maxp
+maxperm
+maxpg
+maxpgno
+maxrec
+maxsites
+maxsize
+maxtimeout
+maxto
+maxtxn
+maxtxns
+maxwrite
+maxwrites
+mb
+mbp
+mbucket
+mbuf
+mbytes
+mbytesp
+md
+mday
+mdays
+mem
+membar
+memcmp
+memcmps
+memcpy
+memmove
+memp
+memset
+metachk
+metadata
+metaflags
+metagroup
+metalsn
+metapage
+metasub
+metaswap
+methodID
+mf
+mfence
+mfp
+mgr
+mgrp
+midpage
+millitm
+mincommitperflush
+minkey
+minkeyp
+minkeypage
+minlocks
+minp
+minval
+minver
+minwrite
+minwrites
+mip
+mips
+mis
+misc
+mjc
+mkdir
+mkdir's
+mkpath
+mlock
+mmap
+mmap'd
+mmap'ing
+mmapped
+mmapsize
+mmapsizep
+mmetalsn
+mmpgno
+modeFlag
+moremiddle
+mortem
+moshen
+mov
+movb
+movl
+mp
+mpf
+mpfarray
+mpfq
+mpgno
+mpip
+mpool
+mpoolfile
+mpools
+mpreg
+mprotect
+mps
+mrv
+ms
+msem
+msemaphore
+msg
+msg's
+msgadd
+msgbuf
+msgcall
+msgdbt
+msgdir
+msgfile
+msgfp
+msgs
+msgth
+msgtype
+msgv
+msize
+mswap
+mt
+mti
+mtx
+mtxmgr
+mtxp
+mtxregion
+multiversion
+munlock
+munmap
+mut
+mutex
+mutexes
+mutexlocks
+mutexp
+muxfile
+mv
+mvcc
+mvptr
+myClassDb
+myDatabaseName
+myDb
+myStore
+myclassdb
+mydatabase
+mydb
+mydrive
+mydrivexxx
+myfree
+mylock
+myobjc
+mytime
+myval
+n'th
+nFiles
+nO
+nP
+nTV
+nTt
+naborts
+nactive
+nalloc
+namelistp
+nameop
+namep
+namesp
+nano
+nargc
+nargv
+nbegins
+nbucket
+nbuckets
+nbuf
+nbytes
+ncache
+ncachep
+ncaches
+ncommit
+ncommits
+nconflicts
+ncp
+ncurs
+ncvs
+ndary
+ndata
+ndbc
+ndbm
+ndeadalloc
+ndeadlocks
+ndir
+ndowngrade
+ndx
+needswap
+neg
+nelem
+nelemp
+nelems
+nentries
+nentry
+netaddr
+neterr
+nevict
+newFormats
+newalloc
+newclient
+newdata
+newdatabase
+newfh
+newfile
+newitem
+newlist
+newmaster
+newname
+newopd
+newpage
+newpgno
+newsite
+newsitep
+newsites
+newsize
+next's
+nextIndex
+nextdup
+nextents
+nextinfo
+nextkey
+nextlsn
+nextnodup
+nextpgno
+nf
+nfid
+nfiles
+ng
+nio
+nitems
+nkeys
+nl
+nlist
+nlockers
+nlocks
+nlocktimeouts
+nlog
+nlsn
+nmodes
+nnext
+nnextlsn
+nnowaits
+noWait
+noarchive
+noautoinit
+nobjects
+nobuf
+nobuffer
+nodeMaxEntries
+nodename
+nodup
+nodupdata
+noet
+nogrant
+nogroup
+nohasham
+nolease
+nolock
+nolocking
+nolonger
+nomacro
+nomem
+nommap
+nomutex
+nonblock
+noone
+noop
+noorderchk
+nooverwrite
+nop
+nopanic
+nopenp
+noreorder
+norep
+norepmgr
+nosort
+nosync
+nosystemmem
+notdurable
+notfound
+notgranted
+notsup
+notused
+notzero
+noundo
+novrfy
+nowait
+nowaits
+np
+npages
+npeers
+npg
+npg's
+npgno
+nprocs
+nptr
+nr
+nread
+nreaders
+nrec
+nrecords
+nrecs
+nreg
+nregions
+nreleases
+nrepeat
+nrequests
+nrestores
+nsec
+nsites
+nsize
+nskeys
+nsl
+nsleep
+nsleepp
+nsnapshot
+ntasks
+nthreads
+nthrottles
+ntohl
+ntxns
+ntxntimeouts
+nuls
+num
+numberOfKeysRead
+numdup
+numdups
+numext
+numlocks
+nums
+nupgrade
+nval
+nvotes
+nw
+nwrite
+nwritep
+nwriters
+nwrites
+nwrotep
+nxt
+obj
+objc
+objectArrayToString
+objectToData
+objectToKey
+objectToValue
+objectsteals
+objp
+objs
+objv
+octets
+offdup
+offp
+offpage
+offsetp
+oflags
+ohash
+ok
+oldConfig
+oldValue
+oldValues
+olddata
+olditem
+oldname
+oldrec
+oldsize
+oldskey
+oldver
+oldversion
+oli
+omniti
+omode
+ondisk
+oneToMany
+oneToOne
+onefile
+onepeer
+onint
+onoff
+onoffp
+onpage
+op
+opcnt
+opd
+openCursors
+openDb
+openEnv
+openFlags
+openfd
+openfiles
+openhandle
+opensub
+opflags
+opgno
+opmods
+ops
+optarg
+opterr
+optind
+optopt
+optreset
+orderchkonly
+orderedkeys
+org
+orig
+originfo
+origline
+origmap
+origp
+os
+osynch
+outBuffer
+outbuf
+outdatedp
+outfd
+outfile
+outfp
+outlen
+outstr
+ovfl
+ovflok
+ovflpage
+ovflpoint
+ovflsize
+ovput
+ovref
+pSs
+pSst
+padDecrypt
+padEncrypt
+padp
+pagecnt
+pageimage
+pageinfo
+pagelayout
+pagelist
+pagelsn
+pageno
+pagep
+pagereq
+pagesize
+pagesizep
+pagesizes
+pagespace
+pagesz
+pagetype
+pagezero
+pagf
+pagfno
+panic'd
+panic'ing
+paniccall
+panicing
+panicstate
+params
+parentSsn
+parentid
+parseLong
+partialgettest
+partsize
+passwd
+passwds
+patchver
+paula
+paulo
+pbuf
+pc
+pcount
+pct
+pcursor
+pd
+pdata
+pdbp
+pdf
+penv
+perf
+perfdb
+perftool
+perms
+personValues
+pflag
+pfx
+pg
+pgaddr
+pgcookie
+pgdbp
+pgdbt
+pgerr
+pget
+pgfmt
+pgfree
+pggap
+pgheader
+pgin
+pginfo
+pgip
+pglist
+pgmax
+pgmin
+pgno
+pgnoadd
+pgnoaddr
+pgnop
+pgnos
+pgnum
+pgout
+pgp
+pgread
+pgrec
+pgs
+pgset
+pgsize
+pgwrite
+ph
+php
+physdel
+physwrite
+pid
+pids
+pinref
+pinsert
+pitem
+pk
+pkey
+pkey's
+pkeyInteger
+pkeyint
+pkeys
+pkg
+pl
+placeholder
+plist
+pm
+pmap
+pn
+poff
+portmapper
+pos
+posix
+postdestroy
+postlog
+postlogmeta
+postopen
+postsync
+pp
+ppc
+pqrst
+pr
+prR
+prdb
+prdbt
+pre
+pread
+prec
+predestroy
+preopen
+preparse
+preplist
+preprocess
+preprocessed
+preread
+prereq
+presorted
+prev
+prev's
+prevdup
+prevfile
+previousIndex
+prevlsn
+prevnodup
+prflags
+prfooter
+prheader
+pri
+priKey
+priceDb
+pridata
+primaryKey
+primaryKeyData
+primaryKeyFormat
+primaryKeyInput
+primaryKeyThang
+primget
+printf
+printlock
+printlog
+priorityp
+prnpage
+proactively
+proc
+procs
+proff
+prog
+progname
+progpath
+protos
+proxied
+proxyTypes
+prpage
+prqueue
+prtree
+pseudorandom
+psize
+psplit
+pstat
+ptail
+pthread
+pthreads
+ptr
+ptrdiff
+ptype
+pupdate
+putAll
+putall
+putchar
+putitem
+putobj
+putop
+putpageinfo
+putr
+pv
+pwrite
+qV
+qam
+qamc
+qammeta
+qmeta
+qmpf
+qnx
+qp
+qr
+qs
+qsort
+qtest
+quV
+queuestart
+quicksort
+quotedStr
+rRV
+rRs
+rV
+raison
+ramc
+rand
+randtbl
+rb
+rbtree
+rcon
+rcount
+rcrd
+rcuradj
+rcursor
+rcvd
+rdata
+rdbc
+rdbenv
+rdonly
+rdump
+reacquired
+reacquires
+readBigInteger
+readBoolean
+readByte
+readBytes
+readChar
+readChars
+readDouble
+readFloat
+readInt
+readKeyObject
+readLong
+readObject
+readShort
+readSortedDouble
+readSortedFloat
+readString
+readUnsignedByte
+readUnsignedInt
+readUnsignedShort
+readd
+readdir
+readkey
+readn
+readonly
+readratio
+readsocket
+readv
+realloc
+realloc'd
+realloc'ed
+reallocing
+realpri
+rec
+recfill
+reclen
+reclength
+recno
+recnop
+recnos
+recnum
+recnums
+recognised
+recops
+record's
+recordNumber
+recordlen
+recordp
+recoveryFeedback
+recs
+rectype
+rectypes
+recvd
+refactor
+refcnt
+refcount
+refcounting
+reffed
+reflectField
+reflectMethod
+refs
+regcomp
+regenv
+regid
+regids
+reginfo
+regionmax
+registerClass
+registerPriKeyObject
+reglocks
+regmutex
+regmutexes
+regop
+regs
+regsize
+relen
+relink
+rem
+remainingfiles
+remevent
+remfile
+remfirst
+remlock
+removeAll
+removeall
+remrem
+renum
+renv
+reorg
+rep's
+repdb
+repflag
+repl
+replication's
+replicationManagerStart
+replpair
+replyp
+repmgr
+repmgr's
+repoman
+reppg
+repquote
+repsite
+repstart
+reptest
+repth
+reput
+reputpair
+req
+rereq
+rerequest
+rerequests
+resizep
+resolvepath
+resync
+ret
+retPrimaryKey
+retValue
+retbuf
+retcopy
+retcount
+rethrown
+reties
+retp
+retsp
+retval
+reverseconcat
+reversedata
+revsplitoff
+rf
+rfd
+rfp
+rget
+rheader
+ri
+rijmen
+rijndael
+rijndaelDecrypt
+rijndaelDecryptRound
+rijndaelEncrypt
+rijndaelEncryptRound
+rijndaelKeySetupDec
+rijndaelKeySetupEnc
+ritem
+riw
+rk
+rkey
+rlen
+rlimit
+rlsn
+rlsnp
+rm
+rmdir
+rmdir's
+rmid
+rmw
+ro
+roff
+rollforward
+rootent
+rootlsn
+rop
+rp
+rp's
+rpath
+rpc
+rpcgen
+rpcid
+rpcserver
+rprint
+rptr
+rq
+rr
+rrecno
+rrecovery
+rs
+rsearch
+rskey
+rsplit
+rtree
+rtxn
+rtype
+rundb
+runlog
+rusage
+rw
+rwrw
+rwrwrw
+rwx
+sKey
+sS
+sV
+sVv
+salloc
+salvager's
+savekey
+savetime
+sc
+sccsid
+sce
+sched
+scount
+scursor
+sdb
+sdbp
+secKey
+secdata
+seckey
+secon
+secondary's
+secondaryKeyCreate
+secs
+secsp
+sectorsize
+segdata
+segid
+sel
+sema
+semid
+seminfo
+semun
+sendpages
+sendproc
+sep
+seq
+seqNo
+seqnum
+seqp
+sequentialness
+serialobj
+servlet
+setAppDispatch
+setAppendRecno
+setBtreeCompare
+setBtreePrefix
+setCacheSize
+setData
+setDuplicatelicateCompare
+setEncrypted
+setErrorHandler
+setErrorPrefix
+setFeedback
+setFeedbackHandler
+setField
+setFlags
+setHash
+setLock
+setMode
+setObj
+setObject
+setOffset
+setOp
+setPanicHandler
+setPartialLength
+setPartialOffset
+setRecno
+setRecordNumber
+setRepRequest
+setReplicationLimit
+setReplicationTransport
+setSize
+setTimeout
+setUserBufferLength
+setValue
+setflags
+setid
+setlsn
+setname
+setsid
+setstacksize
+settimeout
+setto
+settxn
+setval
+sexing
+sgenrand
+sh
+shalloc
+shalloc'ed
+shalloced
+sharedValue
+sharedb
+shareenv
+shash
+shm
+shmat
+shmctl
+shmdt
+shmem
+shmget
+shmname
+shortValue
+shortread
+shownull
+shqueue
+shr
+shreg
+sig
+sigaction
+siginit
+signalled
+signalling
+signo
+sigresend
+simpleTypes
+singleKey
+sizeAdded
+sizeNeeded
+sizefix
+sizeof
+sizeq
+sj
+sk
+skey
+skeyfour
+skeyone
+skeys
+skeyset
+skeythree
+skeytwo
+skiplist
+skiplists
+skipput
+skodonj
+sl
+sle
+sleepycat
+slh
+slumber'd
+smap
+smax
+snapshotting
+sniglet
+snprintf
+sockaddr
+socklen
+sockopt
+socktype
+sortOrder
+sortdups
+sourcep
+sp
+spanp
+sparc
+spawnl
+spgno
+spinlock
+spinlocks
+spinsp
+splitdata
+splitmeta
+splitp
+sprintf
+srand
+srandom
+src
+sread
+ss
+sscanf
+sse
+sshift
+ssize
+sslll
+ssn
+sss
+standalone
+startsWith
+startsync
+startupdone
+stat
+stati
+stats
+stbar
+std
+stddev
+stderr
+stdfd
+stdin
+stdint
+stdio's
+stdlib
+stdmode
+stdout
+stkgrow
+stkrel
+stl
+stmax
+storeConversion
+storeName
+storedCollection
+storedIter
+storedIterator
+storedList
+storedMap
+storedSet
+storedSortedMap
+storedSortedSet
+stqe
+stqh
+str
+strcasecmp
+strcat
+strchr
+strcmp
+strcpy
+strdup
+strdup'ed
+strerror
+strftime
+stringToBytes
+stringp
+strlen
+strncasecmp
+strncat
+strncmp
+strrchr
+strsep
+strtod
+strtol
+strtoul
+struct
+structs
+structure's
+sts
+stwcx
+stype
+subList
+subMap
+subSet
+subcases
+subclassed
+subdata
+subdatabase
+subdatabase's
+subdatabases
+subdb
+subdbname
+subdbpg
+subdbs
+subdistribution
+subdistributions
+submap
+subname
+subpackages
+subtransaction
+subtransactions
+sullivan
+superclasses
+suppressAccessChecks
+sv
+svc
+sw
+swigCPtr
+swpb
+symbian
+sync'd
+sync'ed
+synced
+syncs
+sysattach
+sysbuf
+syscall
+sysconf
+sysdetach
+syserr
+sz
+t's
+tNULL
+tT
+tV
+tVZ
+tVxX
+tableent
+tablesize
+tailMap
+tailSet
+tailq
+tas
+taskLock
+tbhp
+tbuf
+tc
+tchar
+tcl
+tcp
+td
+tdata
+tdkids
+tearDown
+terra
+testName
+testcopy
+testdata
+testdestdir
+testdigits
+testdocopy
+testevolvedir
+tffsp
+tfsp
+thang
+theVendor
+thies
+thr
+thread's
+threadID
+threadNumber
+threadedness
+threadid
+thrp
+tid
+tids
+tiebreaker
+tiebreaking
+timeoutp
+timespec
+timespecp
+timespecs
+timestamp
+timeval
+timout
+timouts
+tlen
+tm
+tmap
+tmax
+tmp
+tmpath
+tmpdir
+tmpmap
+tmpname
+tmutex
+tnum
+toArray
+toBuf
+toHexString
+toInclusive
+toIndex
+toKey
+toList
+toMapEntry
+toString
+toValue
+toched
+todo
+tolower
+toobig
+tp
+tpabort
+tpalloc
+tpbegin
+tpcb
+tpcommit
+tpinit
+tpsvrdone
+tpsvrinit
+tpterm
+tput
+tqe
+tqh
+tr
+transapp
+transport's
+treeorder
+tregion
+trinomials
+trunc
+truncdata
+tryable
+ts
+tsize
+tsl
+tstart
+ttmax
+ttpcbddlk
+ttpcbi
+ttpcbr
+ttype
+tv
+tvoid
+tx
+txn
+txnal
+txnapp
+txnarray
+txnid
+txnidcl
+txnids
+txnip
+txnlist
+txnnosync
+txnp
+txns
+txntimeout
+txnwait
+txt
+typemore
+uK
+uVv
+ua
+ubell
+ud
+udbt
+ufid
+ufree
+uid
+uint
+uintmax
+uintptr
+ul
+ulen
+ulens
+ulinks
+ulong
+umalloc
+umask
+un
+uname
+uncorrect
+undef
+undeleting
+undo'ing
+undodup
+undosplit
+unenhanced
+uni
+unicode
+unindexed
+uniq
+unistd
+unix
+unmap
+unmapfile
+unmark
+unmarshal
+unmarshalData
+unmarshalled
+unpinned
+unpinning
+unprintables
+unref
+unregistry
+upd
+updateDatabaseEntry
+updateDbt
+updateckp
+upg
+upgradeFeedback
+upi
+urealloc
+useCurrentKey
+usePrimaryKey
+useValue
+usec
+useconds
+usecs
+usecsp
+usercopy
+userfree
+usermem
+usr
+usrAppInit
+util
+vV
+vVxXZ
+vVxXyZ
+vZ
+va
+val
+valn
+value's
+valueBinding
+valueData
+valueEntityBinding
+valueFormat
+valueInput
+valueInputOutput
+valueOf
+valueOutput
+var
+variadic
+vars
+vbuf
+vc
+vdp
+vdp's
+vec
+vendorDB
+vendordb
+ver
+verbage
+verboseconfigtest
+verifyFeedback
+vers
+versioned
+versioning
+vflag
+vfprintf
+vica
+view's
+vrfy
+vrfyutil
+vsnprintf
+vsprintf
+vtruncate
+vvp
+vw
+vx
+vxmutex
+vxtmp
+vxtpcb
+vxworks
+wDay
+wHour
+wMinute
+wMonth
+wSecond
+wWeekDay
+wYear
+waitl
+waitlist
+waitpid
+waitsfor
+waittime
+wakeme
+wakep
+walkcnt
+walkdupint
+walkpages
+walkqueue
+wb
+wc
+wce
+wcount
+wday
+weblogic
+weblogic's
+webquill
+wildcard
+windsh
+winnt
+winuser
+wmask
+wnt
+wordlist
+workcurs
+writeAllowed
+writeBigInteger
+writeBoolean
+writeByte
+writeBytes
+writeChar
+writeChars
+writeCursor
+writeDouble
+writeFloat
+writeInt
+writeKeyObject
+writeLong
+writeObject
+writeShort
+writeSortedDouble
+writeSortedFloat
+writeString
+writeUnsignedByte
+writeUnsignedInt
+writeUnsignedShort
+writeable
+writeback
+writelock
+writelocks
+writev
+wrlock
+wrnosync
+wsa
+wsize
+wt
+wthread
+www
+xFFFF
+xa
+xact
+xalinks
+xchg
+xchgb
+xdr
+xid
+xids
+xml
+xor
+xori
+xorl
+xunlock
+xxx
+xy
+xyz
+yieldcpu
+yyy
+zend
+zero'd
+zeroeth
+zerofill
+zipCode
+zipcode
+zl
diff --git a/db-4.8.30/test/scr011/chk.tags b/db-4.8.30/test/scr011/chk.tags
new file mode 100644
index 0000000..9c95702
--- /dev/null
+++ b/db-4.8.30/test/scr011/chk.tags
@@ -0,0 +1,54 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+# We are dropping the tags file for 4.8 - test is invalid
+# for now exit out
+echo "<<< test is no longer valid >>>"
+exit 0
+
+
+t1=__1
+t2=__2
+
+(cd $d && ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_brew$/d' \
+ -e '/^build_s60$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_wince$/d' \
+ -e '/^build_windows$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs_book$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^examples_java$/d' \
+ -e '/^java$/d' \
+ -e '/^mod_db4$/d' \
+ -e '/^perl$/d' \
+ -e '/^php_db4$/d' \
+ -e '/^test$/d' \
+ -e '/^test_cxx$/d' \
+ -e '/^test_micro$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > $t1
+
+(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> tags files"
+ diff $t1 $t2
+ exit 1
+fi
diff --git a/db-4.8.30/test/scr012/chk.vx_code b/db-4.8.30/test/scr012/chk.vx_code
new file mode 100644
index 0000000..041124f
--- /dev/null
+++ b/db-4.8.30/test/scr012/chk.vx_code
@@ -0,0 +1,85 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure the auto-generated utility code in the VxWorks build
+# directory compiles.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+F="$d/clib/getopt.c $d/common/util_arg.c $d/common/util_cache.c
+ $d/common/util_log.c $d/common/util_sig.c $d/*/*_autop.c"
+
+header()
+{
+ echo "int $1(int, char *[]);"
+ echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{return ($1(argc, argv));}"
+}
+
+LIST="\
+ db_archive \
+ db_checkpoint \
+ db_deadlock \
+ db_dump \
+ db_hotbackup \
+ db_load \
+ db_printlog \
+ db_recover \
+ db_stat \
+ db_upgrade \
+ db_verify \
+ dbdemo \
+ test_micro"
+
+# Build each program individually.
+for i in $LIST; do
+ echo " compiling Vxworks version of $i"
+ header $i > MAIN.c
+ if cc -Wall -I.. -I$d -I$d/build_vxworks/$i \
+ $d/build_vxworks/$i/*.c MAIN.c $F ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile VxWorks version of $i"
+ exit 1
+ fi
+done
+
+# Build all of the programs as one big binary.
+inc=`echo $LIST | sed 's/[^ ][^ ]*/-I$d\/build_vxworks\/&/g'`
+src=`echo $LIST | sed 's/[^ ][^ ]*/$d\/build_vxworks\/&\/*.c/g'`
+
+(
+for i in $LIST; do
+ echo "int ${i}_main(int, char *[]);"
+done
+echo "int"
+echo "main(int argc, char *argv[])"
+echo "{"
+echo "int r;"
+for i in $LIST; do
+ echo "r += ${i}_main(argc, argv);"
+done
+echo "return (r);"
+echo "}"
+) > MAIN.c
+
+echo " compiling VxWorks utility composite"
+if cc -Wall -I.. -I$d $inc `eval ls $src` MAIN.c $F ../libdb.a -o t; then
+ :
+else
+ echo 'FAIL: unable to compile VxWorks utility composite'
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr013/chk.stats b/db-4.8.30/test/scr013/chk.stats
new file mode 100644
index 0000000..9831c61
--- /dev/null
+++ b/db-4.8.30/test/scr013/chk.stats
@@ -0,0 +1,128 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure all of the stat structure members are included in
+# all of the possible formats.
+
+# Top-level directory.
+d=../..
+docs=$d/docs/api_reference
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t=__tmp
+
+# Extract the field names for a structure from the db.h file.
+inc_fields()
+{
+ sed -e "/struct $1 {/,/^};$/p" \
+ -e d < $d/dbinc/db.in |
+ sed -e 1d \
+ -e '$d' \
+ -e '/;/!d' \
+ -e 's/;.*//' \
+ -e 's/^[ ].*[ \*]//'
+}
+
+cat << END_OF_IGNORE > IGNORE
+bt_maxkey
+bt_metaflags
+hash_metaflags
+qs_metaflags
+qs_ndata
+st_hash_max_nowait
+END_OF_IGNORE
+
+# Check to make sure the elements of a structure from db.h appear in
+# the other files.
+inc()
+{
+ for i in `inc_fields $1`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ for j in $2; do
+ if egrep -w $i $j > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in $j."
+ exitv=1
+ fi
+ done
+ done
+}
+
+inc "__db_bt_stat" "$d/tcl/tcl_db.c $d/btree/bt_stat.c $docs/C/dbstat.html"
+inc "__db_h_stat" "$d/tcl/tcl_db.c $d/hash/hash_stat.c $docs/C/dbstat.html"
+inc __db_lock_stat \
+ "$d/tcl/tcl_lock.c $d/lock/lock_stat.c $docs/C/lockstat.html"
+inc __db_log_stat "$d/tcl/tcl_log.c $d/log/log_stat.c $docs/C/logstat.html"
+inc __db_mpool_fstat \
+ "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/C/mempstat.html"
+inc __db_mpool_stat \
+ "$d/tcl/tcl_mp.c $d/mp/mp_stat.c $docs/C/mempstat.html"
+inc __db_mutex_stat \
+ "$d/mutex/mut_stat.c $docs/C/mutexstat.html"
+inc "__db_qam_stat" \
+ "$d/tcl/tcl_db.c $d/qam/qam_stat.c $docs/C/dbstat.html"
+inc __db_rep_stat \
+ "$d/tcl/tcl_rep.c $d/rep/rep_stat.c $docs/C/repstat.html"
+inc __db_seq_stat \
+ "$d/tcl/tcl_seq.c $d/sequence/seq_stat.c $docs/C/seqstat.html"
+inc __db_txn_stat \
+ "$d/tcl/tcl_txn.c $d/txn/txn_stat.c $docs/C/txnstat.html"
+
+# Check to make sure the elements from a man page appears in db.in.
+man()
+{
+ for i in `cat $t`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ if egrep -w $i $d/dbinc/db.in > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in db.h."
+ exitv=1
+ fi
+ done
+}
+
+sed -e '/m4_field(/!d' \
+ -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/C/dbstat.html > $t
+man "checking db_stat.so against db.h"
+
+sed -e '/m4_field(/!d' \
+ -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/C/lockstat.html > $t
+man "checking lock_stat.so against db.h"
+
+sed -e '/m4_field(/!d' \
+ -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/C/logstat.html > $t
+man "checking log_stat.so against db.h"
+
+sed -e '/m4_field(/!d' \
+ -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/C/mempstat.html > $t
+man "checking memp_stat.so against db.h"
+
+sed -e '/m4_field(/!d' \
+ -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/C/repstat.html > $t
+man "checking rep_stat.so against db.h"
+
+sed -e '/m4_field(/!d' \
+ -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' < $docs/C/seqstat.html > $t
+man "checking seq_stat.so against db.h"
+
+sed -e '/m4_field(/!d' \
+ -e 's/.*m4_field[^,]*,[ ]*\([^,]*\).*/\1/' \
+ -e 's/__LB__.*//' < $docs/C/txnstat.html > $t
+man "checking txn_stat.so against db.h"
+
+exit $exitv
diff --git a/db-4.8.30/test/scr014/chk.err b/db-4.8.30/test/scr014/chk.err
new file mode 100644
index 0000000..1e09b27
--- /dev/null
+++ b/db-4.8.30/test/scr014/chk.err
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure all of the error values have corresponding error
+# message strings in db_strerror().
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__tmp1
+t2=__tmp2
+
+egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1
+sed -e '/^db_strerror/,/^}/{' \
+ -e '/ case DB_/{' \
+ -e 's/:.*//' \
+ -e 's/.* //' \
+ -e p \
+ -e '}' \
+ -e '}' \
+ -e d \
+ < $d/common/db_err.c > $t2
+
+cmp $t1 $t2 > /dev/null ||
+(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
+
+exit 0
diff --git a/db-4.8.30/test/scr015/README b/db-4.8.30/test/scr015/README
new file mode 100644
index 0000000..8671eb9
--- /dev/null
+++ b/db-4.8.30/test/scr015/README
@@ -0,0 +1,36 @@
+# $Id$
+
+Use the scripts testall or testone to run all, or just one of the C++
+tests. You must be in this directory to run them. For example,
+
+ $ export LIBS="-L/usr/include/BerkeleyDB/lib"
+ $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
+ $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use c++ in your path. Set environment variables $CXX
+to override this. It will also honor any $CXXFLAGS and $LIBS
+variables that are set, except that -c are silently removed from
+$CXXFLAGS (since we do the compilation in one step).
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_cxx-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.cpp file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/db-4.8.30/test/scr015/TestConstruct01.cpp b/db-4.8.30/test/scr015/TestConstruct01.cpp
new file mode 100644
index 0000000..354fe91
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestConstruct01.cpp
@@ -0,0 +1,319 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define ERR(a) \
+ do { \
+ cout << "FAIL: " << (a) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR2(a1,a2) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR3(a1,a2,a3) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
+ } while (0)
+
+#define CHK(a) \
+ do { \
+ int _ret; \
+ if ((_ret = (a)) != 0) { \
+ ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
+ } \
+ } while (0)
+
+#ifdef VERBOSE
+#define DEBUGOUT(a) cout << a << "\n"
+#else
+#define DEBUGOUT(a)
+#endif
+
+#define CONSTRUCT01_DBNAME "construct01.db"
+#define CONSTRUCT01_DBDIR "."
+#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
+
+int itemcount; // count the number of items in the database
+
+// A good place to put a breakpoint...
+//
+void sysexit(int status)
+{
+ exit(status);
+}
+
+void check_file_removed(const char *name, int fatal)
+{
+ unlink(name);
+#if 0
+ if (access(name, 0) == 0) {
+ if (fatal)
+ cout << "FAIL: ";
+ cout << "File \"" << name << "\" still exists after run\n";
+ if (fatal)
+ sysexit(1);
+ }
+#endif
+}
+
+// Check that key/data for 0 - count-1 are already present,
+// and write a key/data for count. The key and data are
+// both "0123...N" where N == count-1.
+//
+// For some reason on Windows, we need to open using the full pathname
+// of the file when there is no environment, thus the 'has_env'
+// variable.
+//
+void rundb(Db *db, int count, int has_env)
+{
+ const char *name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db->set_error_stream(&cerr);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ CHK(db->set_pagesize(1024));
+ CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664));
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ char outbuf[10];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = '0' + i;
+ }
+ outbuf[i++] = '\0';
+ Dbt key(outbuf, i);
+ Dbt data(outbuf, i);
+
+ DEBUGOUT("Put: " << outbuf);
+ CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
+
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ CHK(db->cursor(NULL, &dbcp, 0));
+
+ // Walk through the table, checking
+ Dbt readkey;
+ Dbt readdata;
+ while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
+ char *key_string = (char *)readkey.get_data();
+ char *data_string = (char *)readdata.get_data();
+ DEBUGOUT("Got: " << key_string << ": " << data_string);
+ int len = strlen(key_string);
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad");
+ }
+ else if (strcmp(data_string, key_string) != 0) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string[i] != ('0' + i)) {
+ cout << " got " << key_string
+ << " (" << (int)key_string[i] << ")"
+ << ", wanted " << i
+ << " (" << (int)('0' + i) << ")"
+ << " at position " << i << "\n";
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ cout << " expected more keys, bitmap is: " << expected << "\n";
+ ERR("missing keys in database");
+ }
+ CHK(dbcp->close());
+ CHK(db->close(0));
+}
+
+void t1(int except_flag)
+{
+ cout << " Running test 1:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t2(int except_flag)
+{
+ cout << " Running test 2:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t3(int except_flag)
+{
+ cout << " Running test 3:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t4(int except_flag)
+{
+ cout << " Running test 4:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ CHK(db.close(0));
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t5(int except_flag)
+{
+ cout << " Running test 5:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ rundb(&db, itemcount++, 1);
+ // Note we cannot reuse the old Db!
+ Db anotherdb(&env, 0);
+
+ anotherdb.set_errpfx("test5");
+ rundb(&anotherdb, itemcount++, 1);
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t6(int except_flag)
+{
+ cout << " Running test 6:\n";
+
+ /* From user [#2939] */
+ int err;
+
+ DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ penv->set_cachesize(0, 32 * 1024, 0);
+ penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
+
+ //LEAK: remove this block and leak disappears
+ Db* pdb = new Db(penv,0);
+ if ((err = pdb->close(0)) != 0) {
+ fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
+ }
+ delete pdb;
+ //LEAK: remove this block and leak disappears
+
+ if ((err = penv->close(0)) != 0) {
+ fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
+ }
+ delete penv;
+
+ cout << " finished.\n";
+}
+
+// remove any existing environment or database
+void removeall()
+{
+ {
+ DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
+ (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
+ }
+
+ check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
+ for (int i=0; i<8; i++) {
+ char buf[20];
+ sprintf(buf, "__db.00%d", i);
+ check_file_removed(buf, 1);
+ }
+}
+
+int doall(int except_flag)
+{
+ itemcount = 0;
+ try {
+ // before and after the run, removing any
+ // old environment/database.
+ //
+ removeall();
+ t1(except_flag);
+ t2(except_flag);
+ t3(except_flag);
+ t4(except_flag);
+ t5(except_flag);
+ t6(except_flag);
+
+ removeall();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ ERR2("EXCEPTION RECEIVED", dbe.what());
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ int iterations = 1;
+ if (argc > 1) {
+ iterations = atoi(argv[1]);
+ if (iterations < 0) {
+ ERR("Usage: construct01 count");
+ }
+ }
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ cout << "(" << i << "/" << iterations << ") ";
+ }
+ cout << "construct01 running:\n";
+ if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
+ ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
+ }
+ else if (doall(0) != 0) {
+ ERR("SOME TEST FAILED FOR EXCEPTION TEST");
+ }
+ else {
+ cout << "\nALL TESTS SUCCESSFUL\n";
+ }
+ }
+ return 0;
+}
diff --git a/db-4.8.30/test/scr015/TestConstruct01.testerr b/db-4.8.30/test/scr015/TestConstruct01.testerr
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestConstruct01.testerr
diff --git a/db-4.8.30/test/scr015/TestConstruct01.testout b/db-4.8.30/test/scr015/TestConstruct01.testout
new file mode 100644
index 0000000..9b840f9
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestConstruct01.testout
@@ -0,0 +1,27 @@
+(0/1) construct01 running:
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+
+ALL TESTS SUCCESSFUL
diff --git a/db-4.8.30/test/scr015/TestGetSetMethods.cpp b/db-4.8.30/test/scr015/TestGetSetMethods.cpp
new file mode 100644
index 0000000..383df3c
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestGetSetMethods.cpp
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *dbenv = new DbEnv(0);
+ DbTxn *dbtxn;
+ u_int8_t conflicts[10];
+
+ dbenv->set_error_stream(&cerr);
+ dbenv->set_timeout(0x90000000,
+ DB_SET_LOCK_TIMEOUT);
+ dbenv->set_lg_bsize(0x1000);
+ dbenv->set_lg_dir(".");
+ dbenv->set_lg_max(0x10000000);
+ dbenv->set_lg_regionmax(0x100000);
+ dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
+ dbenv->set_lk_detect(DB_LOCK_DEFAULT);
+ dbenv->set_lk_max_lockers(100);
+ dbenv->set_lk_max_locks(10);
+ dbenv->set_lk_max_objects(1000);
+ dbenv->set_mp_mmapsize(0x10000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv->open(".", DB_CREATE | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL,
+ 0644);
+
+ dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
+ dbtxn->abort();
+
+ dbenv->close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_minkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db *db_bt = new Db(NULL, 0);
+ db_bt->set_bt_minkey(100);
+ db_bt->set_cachesize(0, 0x100000, 0);
+ db_bt->close(0);
+
+ Db *db_h = new Db(NULL, 0);
+ db_h->set_h_ffactor(0x10);
+ db_h->set_h_nelem(100);
+ db_h->set_lorder(0);
+ db_h->set_pagesize(0x10000);
+ db_h->close(0);
+
+ Db *db_re = new Db(NULL, 0);
+ db_re->set_re_delim('@');
+ db_re->set_re_pad(10);
+ db_re->set_re_source("re.in");
+ db_re->close(0);
+
+ Db *db_q = new Db(NULL, 0);
+ db_q->set_q_extentsize(200);
+ db_q->close(0);
+
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what() << "\n";
+ }
+ return 0;
+}
diff --git a/db-4.8.30/test/scr015/TestKeyRange.cpp b/db-4.8.30/test/scr015/TestKeyRange.cpp
new file mode 100644
index 0000000..670f027
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestKeyRange.cpp
@@ -0,0 +1,168 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * NOTE: AccessExample changed to test Db.key_range. We made a global
+ * change of /AccessExample/TestKeyRange/, the only other changes are
+ * marked with comments that are notated as 'ADDED'.
+ */
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class TestKeyRange
+{
+public:
+ TestKeyRange();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TestKeyRange(const TestKeyRange &);
+ void operator = (const TestKeyRange &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ TestKeyRange app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: TestKeyRange\n";
+ exit(1);
+}
+
+const char TestKeyRange::FileName[] = "access.db";
+
+TestKeyRange::TestKeyRange()
+{
+}
+
+void TestKeyRange::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("TestKeyRange");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+ Dbt *firstkey = NULL;
+ char firstbuf[1024];
+
+ for (;;) {
+ cout << "input>";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+ if (firstkey == NULL) {
+ strcpy(firstbuf, buf);
+ firstkey = new Dbt(firstbuf, len + 1);
+ }
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ cout << "\n";
+ }
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ /*ADDED...*/
+ DB_KEY_RANGE range;
+ memset(&range, 0, sizeof(range));
+
+ db.key_range(NULL, firstkey, &range, 0);
+ printf("less: %f\n", range.less);
+ printf("equal: %f\n", range.equal);
+ printf("greater: %f\n", range.greater);
+ /*end ADDED*/
+
+ Dbt key;
+ Dbt data;
+
+ // Walk through the table, printing the key/data pairs.
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/db-4.8.30/test/scr015/TestKeyRange.testin b/db-4.8.30/test/scr015/TestKeyRange.testin
new file mode 100644
index 0000000..a2b6bd7
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestKeyRange.testin
@@ -0,0 +1,8 @@
+first line is alphabetically somewhere in the middle.
+Blah blah
+let's have exactly eight lines of input.
+stuff
+more stuff
+and even more stuff
+lastly
+but not leastly.
diff --git a/db-4.8.30/test/scr015/TestKeyRange.testout b/db-4.8.30/test/scr015/TestKeyRange.testout
new file mode 100644
index 0000000..25b2e1a
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestKeyRange.testout
@@ -0,0 +1,19 @@
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>less: 0.375000
+equal: 0.125000
+greater: 0.500000
+Blah blah : halb halB
+and even more stuff : ffuts erom neve dna
+but not leastly. : .yltsael ton tub
+first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
+lastly : yltsal
+let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
+more stuff : ffuts erom
+stuff : ffuts
diff --git a/db-4.8.30/test/scr015/TestLogc.cpp b/db-4.8.30/test/scr015/TestLogc.cpp
new file mode 100644
index 0000000..c22fdfc
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestLogc.cpp
@@ -0,0 +1,106 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+static void show_dbt(ostream &os, Dbt *dbt)
+{
+ int i;
+ int size = dbt->get_size();
+ unsigned char *data = (unsigned char *)dbt->get_data();
+
+ os << "size: " << size << " data: ";
+ for (i=0; i<size && i<10; i++) {
+ os << (int)data[i] << " ";
+ }
+ if (i<size)
+ os << "...";
+}
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *env = new DbEnv(0);
+ DbTxn *dbtxn;
+ env->open(".", DB_CREATE | DB_INIT_LOG |
+ DB_INIT_TXN | DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db *db1 = new Db(env, 0);
+ env->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ db1->open(dbtxn, "first.db", NULL, DB_BTREE, DB_CREATE, 0);
+ Dbt *key = new Dbt((char *)"a", 1);
+ Dbt *data = new Dbt((char *)"b", 1);
+ db1->put(dbtxn, key, data, 0);
+ key->set_data((char *)"c");
+ data->set_data((char *)"d");
+ db1->put(dbtxn, key, data, 0);
+ dbtxn->commit(0);
+
+ env->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ key->set_data((char *)"e");
+ data->set_data((char *)"f");
+ db1->put(dbtxn, key, data, 0);
+ key->set_data((char *)"g");
+ data->set_data((char *)"h");
+ db1->put(dbtxn, key, data, 0);
+ dbtxn->commit(0);
+ db1->close(0);
+
+ // flush the log
+ env->log_flush(NULL);
+
+ // Now get a log cursor and walk through.
+ DbLogc *logc;
+
+ env->log_cursor(&logc, 0);
+ int ret = 0;
+ DbLsn lsn;
+ Dbt *dbt = new Dbt();
+ u_int32_t flags = DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // cout << "logc.get: " << count;
+ // show_dbt(cout, dbt);
+ // cout << "\n";
+ //
+ count++;
+ flags = DB_NEXT;
+ }
+ if (ret != DB_NOTFOUND) {
+ cerr << "*** FAIL: logc.get returned: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ logc->close(0);
+
+ // There has to be at *least* 12 log records,
+ // 2 for each put, plus for commits.
+ //
+ if (count < 12)
+ cerr << "*** FAIL: not enough log records\n";
+
+ cout << "TestLogc done.\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "*** FAIL: " << dbe.what() <<"\n";
+ }
+ return 0;
+}
diff --git a/db-4.8.30/test/scr015/TestLogc.testout b/db-4.8.30/test/scr015/TestLogc.testout
new file mode 100644
index 0000000..afac3af
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/db-4.8.30/test/scr015/TestSimpleAccess.cpp b/db-4.8.30/test/scr015/TestSimpleAccess.cpp
new file mode 100644
index 0000000..625c705
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestSimpleAccess.cpp
@@ -0,0 +1,66 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char *)"key", 4);
+ Dbt *datadbt = new Dbt((char *)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char *)"key", 4);
+ Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/db-4.8.30/test/scr015/TestSimpleAccess.testout b/db-4.8.30/test/scr015/TestSimpleAccess.testout
new file mode 100644
index 0000000..dc88d47
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/db-4.8.30/test/scr015/TestTruncate.cpp b/db-4.8.30/test/scr015/TestTruncate.cpp
new file mode 100644
index 0000000..c212120
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestTruncate.cpp
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char*)"key", 4);
+ Dbt *datadbt = new Dbt((char*)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char*)"key", 4);
+ Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ cout << "truncating data...\n";
+ u_int32_t nrecords;
+ db->truncate(NULL, &nrecords, 0);
+ cout << "truncate returns " << nrecords << "\n";
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "after truncate get: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ db->close(0);
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/db-4.8.30/test/scr015/TestTruncate.testout b/db-4.8.30/test/scr015/TestTruncate.testout
new file mode 100644
index 0000000..0a4bc98
--- /dev/null
+++ b/db-4.8.30/test/scr015/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after truncate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/db-4.8.30/test/scr015/chk.cxxtests b/db-4.8.30/test/scr015/chk.cxxtests
new file mode 100644
index 0000000..97733c3
--- /dev/null
+++ b/db-4.8.30/test/scr015/chk.cxxtests
@@ -0,0 +1,73 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+[ `uname` = "Linux" ] && LIBS="$LIBS -lpthread"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../env ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
+echo " ====== cxx tests using $CXX"
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/db-4.8.30/test/scr015/ignore b/db-4.8.30/test/scr015/ignore
new file mode 100644
index 0000000..bcd98b5
--- /dev/null
+++ b/db-4.8.30/test/scr015/ignore
@@ -0,0 +1,4 @@
+#
+# $Id$
+#
+# A list of tests to ignore
diff --git a/db-4.8.30/test/scr015/testall b/db-4.8.30/test/scr015/testall
new file mode 100644
index 0000000..5d1ceba
--- /dev/null
+++ b/db-4.8.30/test/scr015/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id$
+#
+# Run all the C++ regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.cpp -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** cxx test $name ignored"
+ else
+ echo " ==== cxx test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/db-4.8.30/test/scr015/testone b/db-4.8.30/test/scr015/testone
new file mode 100644
index 0000000..c1fa93b
--- /dev/null
+++ b/db-4.8.30/test/scr015/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id$
+#
+# Run just one C++ regression test, the single argument
+# is the basename of the test, e.g. TestRpcServer
+
+error()
+{
+ echo '' >&2
+ echo "C++ regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+gdbflag=n
+CXX=${CXX:-c++}
+LIBS=${LIBS:-}
+
+# remove any -c option in the CXXFLAGS
+CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ LIBS="-L$prefix/lib -ldb_cxx $LIBS"
+ CXXFLAGS="-I$prefix/include $CXXFLAGS"
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ --gdb )
+ CXXFLAGS="-g $CXXFLAGS"
+ gdbflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+
+${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$gdbflag" = y ]; then
+ if [ -s $infile ]; then
+ echo "Input file is $infile"
+ fi
+ gdb ./$name
+ exit 0
+elif [ "$stdinflag" = y ]; then
+ ./$name >../$name.out 2>../$name.err
+else
+ ./$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/db-4.8.30/test/scr016/Makefile b/db-4.8.30/test/scr016/Makefile
new file mode 100644
index 0000000..555464a
--- /dev/null
+++ b/db-4.8.30/test/scr016/Makefile
@@ -0,0 +1,18 @@
+TESTCLASSES=\
+ ./src/com/sleepycat/db/test/*.java
+
+all: dbtest.jar
+
+dbtest.jar: classesdir
+ # Compile the tests and build the test jar
+ javac -classpath "${DB_JAR}${CP_SEP}${REQUIRED_JARS}" \
+ -d ./classes ${TESTCLASSES}
+ jar cf ./dbtest.jar -C ./classes ./com/sleepycat
+
+classesdir:
+ [ -d ./classes ] || (mkdir ./classes)
+
+clean:
+ [ -d ./classes ] && rm -rf ./classes
+ [ -f ./dbtest.jar ] && rm ./dbtest.jar
+
diff --git a/db-4.8.30/test/scr016/README b/db-4.8.30/test/scr016/README
new file mode 100644
index 0000000..841d726
--- /dev/null
+++ b/db-4.8.30/test/scr016/README
@@ -0,0 +1,14 @@
+
+This test suite is designed to test the basic functionality of the core DB Java API.
+
+To build and run the test suite you need to have JUnit 4 or higher installed, and set in your environment as JUNIT_JAR.
+
+The script chk.bdb builds and runs all of the test cases currently enabled.
+
+Each new java source file you created needs to be added to the Makefile.
+
+The script createnewtest.sh in this directory does just that. run the script with a single parameter which is the name of the new test to create. It will populate the java source file in com/sleepycat/db/test with the basic layout all tests should have.
+
+The two files config_nix and config_win are used by the test cases to load profile information. For now they only contain a path to the root directory where test output files should be written.
+The configuration gets loaded via the TestUtils class.
+
diff --git a/db-4.8.30/test/scr016/chk.bdb b/db-4.8.30/test/scr016/chk.bdb
new file mode 100644
index 0000000..88ef64d
--- /dev/null
+++ b/db-4.8.30/test/scr016/chk.bdb
@@ -0,0 +1,75 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Run the DB Java API test suite.
+# Alternatively supply the class name for an individual test
+# case and have that run.
+
+# NOTES:
+# This test requires one JAR not included with the Berkeley DB
+# distribution: JUnit (junit.jar) I've been using the 8/31/2002 version
+# of JUnit. You can download this JAR from http://jakarta.apache.org/
+#
+# JUNIT_JAR=/Users/gburd/Unix/opt/junit/junit.jar
+
+[ "x$JUNIT_JAR" = "x" ] && {
+ echo 'FAIL: unset environment variable JUNIT_JAR for junit.jar.'
+ exit 1
+}
+
+[ -f $JUNIT_JAR ] || {
+ echo 'FAIL: JUNIT_JAR not a valid path to the junit.jar.'
+ exit 1
+}
+
+case `uname` in
+*CYGWIN*)
+ CP_SEP=";"
+ d="../../build_windows/Win32/Debug"
+ DB_LIB_DIR="$d"
+ PATH="../../build_windows/Win32/Debug:$PATH"
+ export PATH;;
+*)
+ CP_SEP=":"
+ d="../../build_unix/"
+ DB_LIB_DIR="$d/.libs"
+esac
+
+REQUIRED_JARS=$JUNIT_JAR
+DB_JAR=$d/db.jar
+export DB_JAR
+export REQUIRED_JARS
+export CP_SEP
+export DB_LIB_DIR
+
+# Build the tests.
+
+make clean
+
+[ -f ./dbtest.jar ] || (make dbtest.jar) || {
+ echo 'FAIL: unable to find or build dbtest.jar'
+ exit 1
+}
+
+# Run the tests.
+
+if [ -n "$1" ]
+then
+ echo "Running: $1"
+ java -Xcheck:jni -Djava.library.path=$DB_LIB_DIR -cp "$REQUIRED_JARS$CP_SEP$DB_JAR$CP_SEP./dbtest.jar" org.junit.runner.JUnitCore $1
+
+else
+
+ for f in `find classes -name "*Test.class"`; do
+ c=`echo "$f" | sed -e 's/classes\///' -e 's/\.class//' -e 's/\//./g'`
+ echo "Running: $c"
+ if java -Xcheck:jni -Djava.library.path=$DB_LIB_DIR -cp "$REQUIRED_JARS$CP_SEP$DB_JAR$CP_SEP./dbtest.jar" org.junit.runner.JUnitCore $c ; then
+ :
+ else
+ echo "FAIL: test program failed"
+ # exit 1
+ fi
+ done
+fi
+exit 0
diff --git a/db-4.8.30/test/scr016/makenewtest.sh b/db-4.8.30/test/scr016/makenewtest.sh
new file mode 100644
index 0000000..70c2803
--- /dev/null
+++ b/db-4.8.30/test/scr016/makenewtest.sh
@@ -0,0 +1,117 @@
+
+# A script that helps create an empty test shell
+# application.
+# The only parameter is the name of the class.
+# The output file will be in the src/com/sleepycat/db/test
+# directory.
+
+
+case $1 in
+*Test)
+;;
+*)
+ echo "New test case names must end in Test."
+ exit 1;;
+esac
+
+outdir="src/com/sleepycat/db/test"
+if [ ! -d $outdir ]
+then
+ echo "Could not find test source directory. Ensure the script is being run from the right place."
+ exit 1
+fi
+
+outname="$outdir/$1.java"
+
+if [ -f $outname ]
+then
+ echo "A test with that file name exists."
+ echo -n "Are you sure you want to overwrite the file (yes/no)? "
+ read got_ok
+ if [ $got_ok != "yes" ]
+ then
+ exit 1
+ else
+ echo "" > $outname
+ fi
+fi
+
+nameupper=`echo $1 | tr -t [:lower:] [:upper:]`
+namelower=`echo $1 | tr -t [:upper:] [:lower:]`
+
+echo "/*-" >> $outname
+echo " * See the file LICENSE for redistribution information." >> $outname
+echo " * " >> $outname
+echo " * Copyright (c) 2002-2009 Oracle. All rights reserved." >> $outname
+echo " *" >> $outname
+echo " */" >> $outname
+echo "" >> $outname
+
+echo "" >> $outname
+
+echo "package com.sleepycat.db.test;" >> $outname
+echo "" >> $outname
+echo "import org.junit.After;" >> $outname
+echo "import org.junit.AfterClass;" >> $outname
+echo "import org.junit.Before;" >> $outname
+echo "import org.junit.BeforeClass;" >> $outname
+echo "import org.junit.Test;" >> $outname
+echo "import static org.junit.Assert.assertEquals;" >> $outname
+echo "import static org.junit.Assert.fail;" >> $outname
+echo "" >> $outname
+echo "import com.sleepycat.db.*;" >> $outname
+echo "" >> $outname
+
+# some other likely ones :)
+echo "import java.io.File;" >> $outname
+echo "import java.io.FileNotFoundException;" >> $outname
+echo "import java.io.IOException;" >> $outname
+
+echo "" >> $outname
+echo "import com.sleepycat.db.test.TestUtils;" >> $outname
+
+echo "public class $1 {" >> $outname
+
+echo " public static final String ${nameupper}_DBNAME = \"${namelower}.db\";" >> $outname
+
+echo " @BeforeClass public static void ClassInit() {" >> $outname
+echo " TestUtils.loadConfig(null);" >> $outname
+echo -n " TestUtils.check_file_removed(TestUtils." >> $outname
+echo "getDBFileName(${nameupper}_DBNAME), true, true);" >> $outname
+echo -n " TestUtils.removeall(true, true, TestUtils." >> $outname
+echo "BASETEST_DBDIR, TestUtils.getDBFileName(${nameupper}_DBNAME));" >> $outname
+echo " }" >> $outname
+
+echo "" >> $outname
+echo " @AfterClass public static void ClassShutdown() {" >> $outname
+echo -n " TestUtils.check_file_removed(TestUtils." >> $outname
+echo "getDBFileName(${nameupper}_DBNAME), true, true);" >> $outname
+echo -n " TestUtils.removeall(true, true, TestUtils." >> $outname
+echo "BASETEST_DBDIR, TestUtils.getDBFileName(${nameupper}_DBNAME));" >> $outname
+echo " }" >> $outname
+
+echo "" >> $outname
+echo " @Before public void PerTestInit()" >> $outname
+echo " throws Exception {" >> $outname
+echo " }" >> $outname
+
+echo "" >> $outname
+echo " @After public void PerTestShutdown()" >> $outname
+echo " throws Exception {" >> $outname
+echo " }" >> $outname
+
+echo " /*" >> $outname
+echo " * Test case implementations." >> $outname
+echo " * To disable a test mark it with @Ignore" >> $outname
+echo " * To set a timeout(ms) notate like: @Test(timeout=1000)" >> $outname
+echo " * To indicate an expected exception notate like: $Test(expected=Exception)" >> $outname
+echo " */" >> $outname
+
+echo "" >> $outname
+echo " @Test public void test1()" >> $outname
+echo " throws DatabaseException, FileNotFoundException" >> $outname
+echo " {" >> $outname
+echo " }" >> $outname
+
+echo "}" >> $outname
+
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/AppendRecnoTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/AppendRecnoTest.java
new file mode 100644
index 0000000..0ed7455
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/AppendRecnoTest.java
@@ -0,0 +1,209 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+/*
+ * Alexg 23-4-06
+ * Based on scr016 TestAppendRecno test application.
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.Before;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import com.sleepycat.db.*;
+
+import com.sleepycat.db.DatabaseException;
+
+import com.sleepycat.db.test.TestUtils;
+
+public class AppendRecnoTest implements RecordNumberAppender {
+
+ public static final String RECNOTEST_DBNAME = "appendrecnotest.db";
+
+ public static final String[] EXPECTED_ENTRIES = { "data0_xyz", "data1_xy", "ata4_xyz", "ata5_xy",
+ "abc", "", "data9_xyz" };
+
+ int callback_count = 0;
+ boolean callback_throws = false;
+
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(RECNOTEST_DBNAME), true, true);
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(RECNOTEST_DBNAME), true, true);
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ TestUtils.removeall(true, false, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(RECNOTEST_DBNAME));
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+
+ /*
+ * Test creating a new database.
+ */
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ dbConfig.setErrorStream(TestUtils.getErrorStream());
+ dbConfig.setErrorPrefix("AppendRecnoTest");
+ dbConfig.setType(DatabaseType.RECNO);
+ dbConfig.setPageSize(1024);
+ dbConfig.setAllowCreate(true);
+ dbConfig.setRecordNumberAppender(this);
+
+ Database db = new Database(TestUtils.getDBFileName(RECNOTEST_DBNAME), null, dbConfig);
+
+ for (int i=0; i<10; i++) {
+ TestUtils.DEBUGOUT("\n*** Iteration " + i );
+ boolean gotExcept = false;
+ try {
+ RecnoEntry key = new RecnoEntry(77+i);
+ DatabaseEntry data = new DatabaseEntry((new String("data" + i + "_xyz")).getBytes());
+ db.append(null, key, data);
+ }
+ catch (DatabaseException dbe) {
+ gotExcept = true;
+ // Can be expected since testing throwing from the appendRecordNumber callback.
+ TestUtils.DEBUGOUT("dbe: " + dbe);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ gotExcept = true;
+ TestUtils.DEBUGOUT("ArrayIndex: " + e);
+ }
+ if((gotExcept && callback_throws == false ) || (!gotExcept && callback_throws == true))
+ TestUtils.DEBUGOUT(3, "appendRecordNumber callback exception or non-exception condition dealt with incorrectly. Case " + callback_count);
+ }
+
+ Cursor dbcp = db.openCursor(null, CursorConfig.DEFAULT);
+
+ // Walk through the table, validating the key/data pairs.
+ RecnoEntry readkey = new RecnoEntry();
+ DatabaseEntry readdata = new DatabaseEntry();
+ TestUtils.DEBUGOUT("Dbc.get");
+
+ int itemcount = 0;
+ while (dbcp.getNext(readkey, readdata, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+ String gotString = new String(readdata.getData(), readdata.getOffset(), readdata.getSize());
+ TestUtils.DEBUGOUT(1, readkey.getRecno() + " : " + gotString);
+
+ if(readkey.getRecno() != ++itemcount)
+ TestUtils.DEBUGOUT(3, "Recno iteration out of order. key: " + readkey.getRecno() + " item: " + itemcount);
+
+ if(itemcount > EXPECTED_ENTRIES.length)
+ TestUtils.ERR("More entries in recno DB than expected.");
+
+
+ if(gotString.compareTo(EXPECTED_ENTRIES[itemcount-1]) != 0)
+ TestUtils.DEBUGOUT(3, "Recno - stored data mismatch. Expected: " + EXPECTED_ENTRIES[itemcount-1] + " received: " + gotString);
+ }
+
+ dbcp.close();
+ db.close(false);
+
+ }
+
+ public void appendRecordNumber(Database db, DatabaseEntry data, int recno)
+ throws DatabaseException
+ {
+ callback_throws = false;
+ TestUtils.DEBUGOUT("AppendRecnoTest::appendRecordNumber. data " + new String(data.getData()) + " recno: " + recno);
+
+ switch (callback_count++) {
+ case 0:
+ // nothing
+ break;
+
+ case 1:
+ data.setSize(data.getSize() - 1);
+ break;
+
+ case 2:
+ // Should result in an error.
+ callback_throws = true;
+ TestUtils.DEBUGOUT("throwing...");
+ throw new DatabaseException("appendRecordNumber thrown");
+ //not reached
+
+ case 3:
+ // Should result in an error (size unchanged).
+ callback_throws = true;
+ data.setOffset(1);
+ break;
+
+ case 4:
+ data.setOffset(1);
+ data.setSize(data.getSize() - 1);
+ break;
+
+ case 5:
+ data.setOffset(1);
+ data.setSize(data.getSize() - 2);
+ break;
+
+ case 6:
+ data.setData(new String("abc").getBytes());
+ data.setSize(3);
+ break;
+
+ case 7:
+ // Should result in an error.
+ callback_throws = true;
+ data.setData(new String("abc").getBytes());
+ data.setSize(4);
+ break;
+// TODO: Broken - does not throw an exception.
+ case 8:
+ // TODO: Should this result in an error?
+ data.setData(null);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ static class RecnoEntry extends DatabaseEntry
+ {
+ public RecnoEntry() {
+ super();
+ }
+
+ public RecnoEntry(int value)
+ {
+ setReuseBuffer(false);
+ arr = new byte[4];
+ setData(arr); // use our local array for data
+ setRecno(value);
+ }
+
+ public void setRecno(int value)
+ {
+ setRecordNumber(value);
+ }
+
+ public int getRecno()
+ {
+ return getRecordNumber();
+ }
+ byte arr[];
+ } // end of RecnoEntry sub-class.
+
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/AssociateTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/AssociateTest.java
new file mode 100644
index 0000000..7e07cfe
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/AssociateTest.java
@@ -0,0 +1,252 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+/*
+ * Alexg 23-4-06
+ * Based on scr016 TestAssociate test application.
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.Before;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import com.sleepycat.db.*;
+
+import com.sleepycat.db.DatabaseException;
+
+import com.sleepycat.db.test.TestUtils;
+
+public class AssociateTest {
+
+ public static final String ASSOCTEST_DBNAME = "associatetest.db";
+
+ public static final String[] DATABASETEST_SAMPLE_DATA = {"abc", "def", "ghi", "JKL", "MNO", "pqrst", "UVW", "y", "Z"};
+
+ public static Database savedPriDb = null;
+ public static Database savedSecDb = null;
+
+ int callback_count = 0;
+ boolean callback_throws = false;
+
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(ASSOCTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(ASSOCTEST_DBNAME));
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(ASSOCTEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+
+ /*
+ * Test creating a new database.
+ */
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ int i;
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ Environment dbEnv = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ dbConfig.setErrorStream(TestUtils.getErrorStream());
+ dbConfig.setErrorPrefix("AssociateTest");
+ dbConfig.setType(DatabaseType.BTREE);
+ dbConfig.setAllowCreate(true);
+
+ Database priDb = dbEnv.openDatabase(null, ASSOCTEST_DBNAME, null, dbConfig);
+
+ SecondaryConfig secConfig = new SecondaryConfig();
+ secConfig.setAllowCreate(true);
+ secConfig.setType(DatabaseType.BTREE);
+ secConfig.setSortedDuplicates(true);
+ secConfig.setKeyCreator(new Capitalize());
+ SecondaryDatabase secDb = dbEnv.openSecondaryDatabase(null, ASSOCTEST_DBNAME+"2", null,
+ priDb, secConfig);
+ savedPriDb = priDb;
+ savedSecDb = secDb;
+
+ // Insert records into the database
+ for(i =0; i < DATABASETEST_SAMPLE_DATA.length; i++)
+ {
+ String curdata = DATABASETEST_SAMPLE_DATA[i];
+ String reversed = (new StringBuffer(curdata)).reverse().toString();
+
+ DatabaseEntry key = new DatabaseEntry(curdata.getBytes());
+ key.setReuseBuffer(false);
+ DatabaseEntry data = new DatabaseEntry(reversed.getBytes());
+ data.setReuseBuffer(false);
+ try {
+ if (priDb.putNoOverwrite(null, key, data) == OperationStatus.KEYEXIST) {
+ // should not in this - since we control the data.
+ TestUtils.DEBUGOUT(2, "Key: " + curdata + " already exists\n");
+ }
+ } catch(DatabaseException dbe) {
+ TestUtils.ERR("Caught DatabaseException: " + dbe);
+ }
+ }
+
+ DatabaseEntry readkey = new DatabaseEntry();
+ readkey.setReuseBuffer(false);
+ DatabaseEntry readdata = new DatabaseEntry();
+ readdata.setReuseBuffer(false);
+ Cursor dbcp = priDb.openCursor(null, CursorConfig.DEFAULT);
+ while (dbcp.getNext(readkey, readdata, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+ String keystring = new String(readkey.getData());
+ String datastring = new String(readdata.getData());
+ String expecteddata = (new StringBuffer(keystring)).reverse().toString();
+
+ boolean found = false;
+ for(i = 0; i < DATABASETEST_SAMPLE_DATA.length; i++)
+ {
+ if(DATABASETEST_SAMPLE_DATA[i].compareTo(keystring) == 0)
+ found = true;
+ }
+ if(!found)
+ TestUtils.ERR("Key: " + keystring + " retrieved from DB, but never added!");
+ if(datastring.compareTo(expecteddata) != 0)
+ TestUtils.ERR("Data: " + datastring + " does not match expected data: " + expecteddata);
+ }
+
+ // Test secondary get functionality.
+ DatabaseEntry seckey = new DatabaseEntry();
+ seckey.setReuseBuffer(false);
+ DatabaseEntry secpkey = new DatabaseEntry();
+ secpkey.setReuseBuffer(false);
+ DatabaseEntry secdata = new DatabaseEntry();
+ secdata.setReuseBuffer(false);
+
+ seckey.setData("BC".getBytes());
+ if(secDb.get(null, seckey, secdata, null) == OperationStatus.SUCCESS)
+ {
+ TestUtils.DEBUGOUT(2, "seckey: " + new String(seckey.getData()) + " secdata: " +
+ new String(secdata.getData()));
+ } else {
+ TestUtils.ERR("Secondary get of key: " + new String(seckey.getData()) + " did not succeed.");
+ }
+ // pget
+ if(secDb.get(null, seckey, secpkey, secdata, null) == OperationStatus.SUCCESS)
+ {
+ TestUtils.DEBUGOUT(2, "seckey: " + new String(seckey.getData()) + " secdata: " +
+ new String(secdata.getData()) + " pkey: " + new String(secpkey.getData()));
+
+ // ensure that the retrievals are consistent using both primary and secondary keys.
+ DatabaseEntry tmpdata = new DatabaseEntry();
+ priDb.get(null, secpkey, tmpdata, null);
+ String tmpstr = new String(tmpdata.getData());
+ if(tmpstr.compareTo(new String(secdata.getData())) != 0)
+ TestUtils.ERR("Data retrieved from matching primary secondary keys is not consistent. secdata: " + new String(secdata.getData()) +
+ " pridata: " + new String(tmpdata.getData()));
+ } else {
+ TestUtils.ERR("Secondary pget of key: " + new String(seckey.getData()) + " did not succeed.");
+ }
+ seckey.setData("KL".getBytes());
+ if(secDb.get(null, seckey, secdata, null) == OperationStatus.SUCCESS)
+ {
+ TestUtils.DEBUGOUT(2, "seckey: " + new String(seckey.getData()) + " secdata: " +
+ new String(secdata.getData()));
+ } else {
+ TestUtils.ERR("Secondary get of key: " + new String(seckey.getData()) + " did not succeed.");
+ }
+ // pget
+ if(secDb.get(null, seckey, secpkey, secdata, null) == OperationStatus.SUCCESS)
+ {
+ TestUtils.DEBUGOUT(2, "seckey: " + new String(seckey.getData()) + " secdata: " +
+ new String(secdata.getData()) + " pkey: " + new String(secpkey.getData()));
+
+ // ensure that the retrievals are consistent using both primary and secondary keys.
+ DatabaseEntry tmpdata = new DatabaseEntry();
+ priDb.get(null, secpkey, tmpdata, null);
+ String tmpstr = new String(tmpdata.getData());
+ if(tmpstr.compareTo(new String(secdata.getData())) != 0)
+ TestUtils.ERR("Data retrieved from matching primary secondary keys is not consistent. secdata: " + new String(secdata.getData()) +
+ " pridata: " + new String(tmpdata.getData()));
+ } else {
+ TestUtils.ERR("Secondary pget of key: " + new String(seckey.getData()) + " did not succeed.");
+ }
+
+ }
+
+/**************************************************************************************
+ **************************************************************************************
+ **************************************************************************************/
+ /* creates a stupid secondary index as follows:
+ For an N letter key, we use N-1 letters starting at
+ position 1. If the new letters are already capitalized,
+ we return the old array, but with offset set to 1.
+ If the letters are not capitalized, we create a new,
+ capitalized array. This is pretty stupid for
+ an application, but it tests all the paths in the runtime.
+ */
+ public static class Capitalize implements SecondaryKeyCreator
+ {
+ public boolean createSecondaryKey(SecondaryDatabase secondary,
+ DatabaseEntry key,
+ DatabaseEntry data,
+ DatabaseEntry result)
+ throws DatabaseException
+ {
+ key.setReuseBuffer(false);
+ data.setReuseBuffer(false);
+ result.setReuseBuffer(false);
+ String which = "unknown db";
+ if (savedPriDb.equals(secondary)) {
+ which = "primary";
+ }
+ else if (savedSecDb.equals(secondary)) {
+ which = "secondary";
+ }
+ String keystring = new String(key.getData());
+ String datastring = new String(data.getData());
+ TestUtils.DEBUGOUT(2, "secondaryKeyCreate, Db: " + TestUtils.shownull(secondary) + "(" + which + "), key: " + keystring + ", data: " + datastring);
+ int len = key.getSize();
+ byte[] arr = key.getData();
+ boolean capped = true;
+
+ if (len < 1)
+ throw new DatabaseException("bad key");
+
+ result.setSize(len - 1);
+ for (int i=1; capped && i<len; i++) {
+ if (!Character.isUpperCase((char)arr[i]))
+ capped = false;
+ }
+ if (capped) {
+ TestUtils.DEBUGOUT(2, " creating key(1): " + new String(arr, 1, len-1));
+ result.setData(arr);
+ result.setOffset(1);
+ result.setSize(result.getSize() -1);
+ }
+ else {
+ TestUtils.DEBUGOUT(2, " creating key(2): " + (new String(arr)).substring(1).
+ toUpperCase());
+ result.setData((new String(arr)).substring(1).
+ toUpperCase().getBytes());
+ }
+ return true;
+ }
+ }
+
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/CallbackTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/CallbackTest.java
new file mode 100644
index 0000000..420452a
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/CallbackTest.java
@@ -0,0 +1,159 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+/*
+ * Alexg 23-4-06
+ * Based on scr016 TestCallback test application.
+ *
+ * Simple tests for DbErrorHandler, DbFeedbackHandler, DbPanicHandler
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.Before;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import com.sleepycat.db.*;
+
+import com.sleepycat.db.DatabaseException;
+
+import com.sleepycat.db.test.TestUtils;
+
+public class CallbackTest
+ implements FeedbackHandler, PanicHandler, ErrorHandler {
+
+ public static final String CALLBACKTEST_DBNAME = "callbacktest.db";
+
+ int callback_count = 0;
+ boolean callback_throws = false;
+
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(CALLBACKTEST_DBNAME), true, true);
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(CALLBACKTEST_DBNAME), true, true);
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(CALLBACKTEST_DBNAME));
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(CALLBACKTEST_DBNAME));
+ }
+
+ /*
+ * Test creating a new database.
+ */
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestUtils.debug_level = 2;
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ envc.setTransactional(true);
+ envc.setInitializeLocking(true);
+ envc.setCacheSize(64 * 1024);
+ envc.setFeedbackHandler(this);
+ envc.setPanicHandler(this);
+ envc.setErrorHandler(this);
+ Environment dbEnv = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ // set up a transaction DB.
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ dbConfig.setType(DatabaseType.BTREE);
+ dbConfig.setAllowCreate(true);
+ Database db = dbEnv.openDatabase(null, CALLBACKTEST_DBNAME, null, dbConfig);
+
+ DatabaseEntry key1 = new DatabaseEntry("key".getBytes());
+ DatabaseEntry data1 = new DatabaseEntry("data".getBytes());
+ // populate was doing some more than this (validating that not retrieving things that were not added)
+ db.putNoOverwrite(null, key1, data1);
+// TestUtil.populate(db);
+
+ CheckpointConfig cpcfg = new CheckpointConfig();
+ cpcfg.setForce(true);
+ dbEnv.checkpoint(cpcfg);
+
+ try {
+ dbConfig.setBtreeComparator(null);
+ }
+ catch (IllegalArgumentException dbe)
+ {
+ TestUtils.DEBUGOUT(1, "got expected exception: " + dbe);
+ // ignore
+ }
+
+ /*
+ // Pretend we crashed, and reopen the environment
+ db = null;
+ dbenv = null;
+
+ dbenv = new DbEnv(0);
+ dbenv.setFeedbackHandler(this);
+ dbenv.open(".", Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL | Db.DB_INIT_LOG
+ | Db.DB_INIT_TXN | Db.DB_RECOVER, 0);
+ */
+
+ dbEnv.panic(true);
+ try {
+ DatabaseEntry key = new DatabaseEntry("foo".getBytes());
+ DatabaseEntry data = new DatabaseEntry();
+ db.get(null, key, data, null);
+ }
+ catch (DatabaseException dbe2)
+ {
+ TestUtils.DEBUGOUT(2, "got expected exception: " + dbe2);
+ // ignore
+ }
+
+ }
+
+ /*
+ * FeedbackHandler interface implementation.
+ */
+ public void recoveryFeedback(Environment dbenv, int percent)
+ {
+ TestUtils.DEBUGOUT(2, "recoveryFeedback callback invoked. percent: " + percent);
+ }
+ public void upgradeFeedback(Database db, int percent)
+ {
+ TestUtils.DEBUGOUT(2, "upgradeFeedback callback invoked. percent: " + percent);
+ }
+ public void verifyFeedback(Database db, int percent)
+ {
+ TestUtils.DEBUGOUT(2, "verifyFeedback callback invoked. percent: " + percent);
+ }
+
+ /*
+ * Panic handler interface implementation.
+ */
+ public void panic(Environment dbenv, DatabaseException e)
+ {
+ TestUtils.DEBUGOUT(2, "panic callback invoked. exception: " + e);
+ }
+
+ /*
+ * Error handler interface implementation.
+ */
+ public void error(Environment dbenv, String errpfx, String msg)
+ {
+ TestUtils.DEBUGOUT(2, "error callback invoked, errpfx: \"" + errpfx + "\", msg: \"" + msg + "\"");
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/ClosedDbTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/ClosedDbTest.java
new file mode 100644
index 0000000..bed47e3
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/ClosedDbTest.java
@@ -0,0 +1,86 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import com.sleepycat.db.test.TestUtils;
+public class ClosedDbTest {
+ public static final String CLOSEDDBTEST_DBNAME = "closeddbtest.db";
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(CLOSEDDBTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(CLOSEDDBTEST_DBNAME));
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(CLOSEDDBTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(CLOSEDDBTEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+ /*
+ * Test case implementations.
+ * To disable a test mark it with @Ignore
+ * To set a timeout(ms) notate like: @Test(timeout=1000)
+ * To indicate an expected exception notate like: (expected=Exception)
+ */
+
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ DatabaseConfig dbConf = new DatabaseConfig();
+ dbConf.setType(DatabaseType.BTREE);
+ dbConf.setAllowCreate(true);
+ Database db = new Database(TestUtils.getDBFileName(CLOSEDDBTEST_DBNAME), null, dbConf);
+
+ DatabaseEntry key = new DatabaseEntry("key".getBytes());
+ DatabaseEntry data = new DatabaseEntry("data".getBytes());
+ db.putNoOverwrite(null, key, data);
+
+ // Now, retrieve. It would be possible to reuse the
+ // same key object, but that would be a-typical.
+ DatabaseEntry getkey = new DatabaseEntry("key".getBytes());
+ DatabaseEntry badgetkey = new DatabaseEntry("badkey".getBytes());
+ DatabaseEntry getdata = new DatabaseEntry();
+ getdata.setReuseBuffer(false); // TODO: is this enabling DB_DBT_MALLOC?
+
+ int ret;
+
+ // close the db - subsequent operations should fail by throwing
+ // an exception.
+ db.close();
+
+ try {
+ db.get(null, getkey, getdata, LockMode.DEFAULT);
+ fail("Database get on a closed Db should not have completed.");
+ } catch (IllegalArgumentException e) {
+ TestUtils.DEBUGOUT(1, "Got expected exception from db.get on closed database.");
+ }
+
+ }
+}
+
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/DatabaseTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/DatabaseTest.java
new file mode 100644
index 0000000..1f99876
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/DatabaseTest.java
@@ -0,0 +1,377 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+/*
+ * Alexg 23-4-06
+ * Based on scr016 TestConstruct01 and TestConstruct02
+ * test applications.
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.Before;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.CursorConfig;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.DatabaseType;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.LockMode;
+import com.sleepycat.db.OperationStatus;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import com.sleepycat.db.test.TestUtils;
+
+public class DatabaseTest {
+
+ public static final String DATABASETEST_DBNAME = "databasetest.db";
+
+ private static int itemcount; // count the number of items in the database
+
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(DATABASETEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ itemcount = 0;
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(DATABASETEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+
+ /*
+ * Test creating a new database.
+ */
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test1 ");
+
+ rundb(itemcount++, options);
+ }
+
+ /*
+ * Test opening and adding to an existing database.
+ */
+ @Test public void test2()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test2 ");
+
+ rundb(itemcount++, options);
+ }
+
+ /*
+ * Test modifying the error prefix multiple times ?
+ */
+ @Test public void test3()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test3 ");
+
+ for (int i=0; i<100; i++)
+ options.db_config.setErrorPrefix("str" + i);
+
+ rundb(itemcount++, options);
+ }
+
+ /*
+ * Test opening a database with an env.
+ */
+ @Test public void test4()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test4 ");
+
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ options.db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ rundb(itemcount++, options);
+ options.db_env.close();
+ }
+
+ /*
+ * Test opening multiple databases using the same env.
+ */
+ @Test public void test5()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test5 ");
+
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ options.db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ rundb(itemcount++, options);
+
+ rundb(itemcount++, options);
+
+ options.db_env.close();
+ }
+
+ /*
+ * Test just opening and closing a DB and an Env without doing any operations.
+ */
+ @Test public void test6()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test6 ");
+
+ Database db = new Database(TestUtils.getDBFileName(DATABASETEST_DBNAME), null, options.db_config);
+
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ Environment db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ db.close();
+ db_env.close();
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ /*
+ * test7 leaves a db and dbenv open; it should be detected.
+ */
+ /* Not sure if relevant with current API.
+ @Test public void test7()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestOptions options = new TestOptions();
+ options.db_config.setErrorPrefix("DatabaseTest::test7 ");
+
+ Database db = new Database(TestUtils.getDBFileName(DATABASETEST_DBNAME), null, options.db_config);
+
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ Environment db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ System.gc();
+ System.runFinalization();
+ }
+ */
+
+ /*
+ * Test creating a new database.
+ */
+ @Test public void test8()
+ throws DatabaseException, FileNotFoundException
+ {
+ TestUtils.removeall(true, false, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(DATABASETEST_DBNAME));
+ itemcount = 0;
+ TestOptions options = new TestOptions();
+ options.save_db = true;
+ options.db_config.setErrorPrefix("DatabaseTest::test8 ");
+
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ options.db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ // stop rundb from closing the database, and pass in one created.
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+
+ options.database.close();
+ options.database = null;
+
+ // reopen the same database.
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+ rundb(itemcount++, options);
+
+ options.database.close();
+ options.database = null;
+
+ }
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ // For some reason on Windows, we need to open using the full pathname
+ // of the file when there is no environment, thus the 'has_env'
+ // variable.
+ //
+ void rundb(int count, TestOptions options)
+ throws DatabaseException, FileNotFoundException
+ {
+ String name;
+
+ Database db;
+ if(options.database == null)
+ {
+ if (options.db_env != null)
+ name = DATABASETEST_DBNAME;
+ else
+ name = TestUtils.getDBFileName(DATABASETEST_DBNAME);
+
+ if(count == 0)
+ options.db_config.setAllowCreate(true);
+
+ if(options.db_env == null)
+ db = new Database(name, null, options.db_config);
+ else
+ db = options.db_env.openDatabase(null, name, null, options.db_config);
+ } else {
+ db = options.database;
+ }
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ DatabaseEntry key = new DatabaseEntry(outbuf, 0, i);
+ DatabaseEntry data = new DatabaseEntry(outbuf, 0, i);
+
+ TestUtils.DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf, 0, i));
+ db.putNoOverwrite(null, key, data);
+
+ // Acquire a cursor for the table.
+ Cursor dbcp = db.openCursor(null, CursorConfig.DEFAULT);
+
+ // Walk through the table, checking
+ DatabaseEntry readkey = new DatabaseEntry();
+ DatabaseEntry readdata = new DatabaseEntry();
+ DatabaseEntry whoknows = new DatabaseEntry();
+
+ /*
+ * NOTE: Maybe want to change from user-buffer to DB buffer
+ * depending on the flag options.user_buffer (setReuseBuffer)
+ * The old version set MALLOC/REALLOC here - not sure if it is the same.
+ */
+
+ TestUtils.DEBUGOUT("Dbc.get");
+ while (dbcp.getNext(readkey, readdata, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+ String key_string =
+ new String(readkey.getData(), 0, readkey.getSize());
+ String data_string =
+ new String(readdata.getData(), 0, readkey.getSize());
+ TestUtils.DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ TestUtils.ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ TestUtils.ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ TestUtils.ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ TestUtils.ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ TestUtils.ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ TestUtils.ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ TestUtils.ERR("missing keys in database");
+ }
+
+ dbcp.close();
+ TestUtils.DEBUGOUT("options.save_db " + options.save_db + " options.database " + options.database);
+ if(options.save_db == false)
+ db.close(false);
+ else if (options.database == null)
+ options.database = db;
+ }
+}
+
+
+class TestOptions
+{
+ int testmask = 0; // which tests to run
+ int user_buffer = 0; // use DB_DBT_USERMEM or DB_DBT_MALLOC
+ int successcounter =0;
+ boolean save_db = false;
+ Environment db_env = null;
+ DatabaseConfig db_config;
+ Database database = null; // db is saved here by rundb if save_db is true.
+
+ public TestOptions()
+ {
+ this.testmask = 0;
+ this.user_buffer = 0;
+ this.successcounter = 0;
+ this.db_env = null;
+
+ db_config = new DatabaseConfig();
+ db_config.setErrorStream(TestUtils.getErrorStream());
+ db_config.setErrorPrefix("DatabaseTest");
+ db_config.setType(DatabaseType.BTREE);
+ // We don't really care about the pagesize
+ db_config.setPageSize(1024);
+
+ }
+
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/EncryptTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/EncryptTest.java
new file mode 100644
index 0000000..4c387cd
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/EncryptTest.java
@@ -0,0 +1,138 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+
+import com.sleepycat.db.test.TestUtils;
+public class EncryptTest {
+ public static final String ENCRYPTTEST_DBNAME = "encrypttest.db";
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(ENCRYPTTEST_DBNAME), true, true);
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(ENCRYPTTEST_DBNAME), true, true);
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(ENCRYPTTEST_DBNAME), true, true);
+ }
+
+ @After() public void PerTestShutdown()
+ throws Exception {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(ENCRYPTTEST_DBNAME), true, true);
+ }
+ /*
+ * Test case implementations.
+ * To disable a test mark it with @Ignore
+ * To set a timeout(ms) notate like: @Test(timeout=1000)
+ * To indicate an expected exception notate like: (expected=Exception)
+ */
+
+ /*
+ * Test the basic db no env and encryption disabled.
+ */
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException, UnsupportedEncodingException
+ {
+ DatabaseConfig dbConf = new DatabaseConfig();
+ dbConf.setType(DatabaseType.BTREE);
+ dbConf.setAllowCreate(true);
+
+ Database db = new Database(TestUtils.getDBFileName(ENCRYPTTEST_DBNAME), null, dbConf);
+
+ DatabaseEntry key = new DatabaseEntry("key".getBytes("UTF-8"));
+ DatabaseEntry data = new DatabaseEntry("data".getBytes("UTF-8"));
+
+ db.put(null, key, data);
+
+ db.close();
+ //try { Thread.sleep(10000); } catch(InterruptedException e) {}
+
+ if(!findData("key"))
+ fail("Did not find the un-encrypted value in the database file after close");
+ }
+
+ /*
+ * Test database with encryption, no env.
+ */
+ @Test public void test2()
+ throws DatabaseException, FileNotFoundException, UnsupportedEncodingException
+ {
+ DatabaseConfig dbConf = new DatabaseConfig();
+ dbConf.setType(DatabaseType.BTREE);
+ dbConf.setAllowCreate(true);
+ dbConf.setEncrypted("password");
+ dbConf.setErrorStream(System.err);
+
+ Database db = new Database(TestUtils.getDBFileName(ENCRYPTTEST_DBNAME), null, dbConf);
+
+ DatabaseEntry key = new DatabaseEntry("key".getBytes("UTF-8"));
+ DatabaseEntry data = new DatabaseEntry("data".getBytes("UTF-8"));
+
+ db.put(null, key, data);
+
+ db.sync();
+ db.close();
+
+ if (findData("key"))
+ fail("Found the un-encrypted value in an encrypted database file after close");
+ }
+
+ private boolean findData(String toFind)
+ {
+ boolean found = false;
+ try {
+ String dbname = TestUtils.getDBFileName(ENCRYPTTEST_DBNAME);
+ File f = new File(dbname);
+ if (!f.exists() || f.isDirectory()) {
+ TestUtils.ERR("Could not find database file: " + dbname + " to look for encrypted string.");
+ return false;
+ }
+ FileInputStream fin = new FileInputStream(f);
+ byte[] buf = new byte[(int)f.length()];
+ fin.read(buf, 0, (int)f.length());
+ fin.close();
+
+ TestUtils.DEBUGOUT(1, "EncryptTest findData file length: " + buf.length);
+ byte firstbyte = (toFind.getBytes("UTF-8"))[0];
+ // buf can contain non-ascii characters, so no easy string search
+ for (int i = 0; i < buf.length - toFind.length(); i++)
+ {
+ if (buf[i] == firstbyte)
+ {
+ if(toFind.compareTo(new String(buf, i, toFind.length())) == 0)
+ {
+ found = true;
+ break;
+ }
+ }
+ }
+ } catch(Exception e) {
+ }
+ return found;
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/HashCompareTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/HashCompareTest.java
new file mode 100644
index 0000000..18a8501
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/HashCompareTest.java
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+
+package com.sleepycat.db.test;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import com.sleepycat.db.test.TestUtils;
+public class HashCompareTest {
+ public static final String HASHCOMPARETEST_DBNAME = "hashcomparetest.db";
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(HASHCOMPARETEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(HASHCOMPARETEST_DBNAME));
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(HASHCOMPARETEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(HASHCOMPARETEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(HASHCOMPARETEST_DBNAME), true, true);
+ java.io.File dbfile = new File(HASHCOMPARETEST_DBNAME);
+ dbfile.delete();
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+ /*
+ * Test case implementations.
+ * To disable a test mark it with @Ignore
+ * To set a timeout(ms) notate like: @Test(timeout=1000)
+ * To indicate an expected exception notate like: (expected=Exception)
+ */
+
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ runTest(DatabaseType.HASH);
+ }
+
+ @Test public void test2()
+ throws DatabaseException, FileNotFoundException
+ {
+ runTest(DatabaseType.BTREE);
+ }
+
+ public void runTest(DatabaseType type)
+ throws DatabaseException, FileNotFoundException
+ {
+ int i;
+ DatabaseConfig conf = new DatabaseConfig();
+ conf.setErrorStream(TestUtils.getErrorStream());
+ conf.setErrorPrefix("HashCompareTest");
+ conf.setType(type);
+ if (type == DatabaseType.HASH) {
+ conf.setHashComparator(new HashComparator());
+ } else
+ conf.setBtreeComparator(new BtreeComparator());
+ conf.setAllowCreate(true);
+
+ Database db = new Database(HASHCOMPARETEST_DBNAME, null, conf);
+
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry data = new DatabaseEntry("world".getBytes());
+ for (i = 0; i < 100; i++) {
+ key.setData((new String("key"+i)).getBytes());
+ db.put(null, key, data);
+ }
+ i = 0;
+ Cursor dbc;
+ dbc = db.openCursor(null, CursorConfig.DEFAULT);
+ while (dbc.getNext(key, data, LockMode.DEFAULT) ==
+ OperationStatus.SUCCESS) {
+ ++i;
+ }
+// System.out.println("retrieved " + i + " entries");
+ dbc.close();
+ db.close();
+
+ }
+}
+
+class HashComparator implements java.util.Comparator
+{
+ public int compare(Object o1, Object o2) {
+// System.out.println("Comparing: " + o1 + ":"+o2);
+ String s1, s2;
+ s1 = new String((byte[])o1);
+ s2 = new String((byte[])o2);
+ return s1.compareToIgnoreCase(s2);
+ }
+}
+
+class BtreeComparator implements java.util.Comparator
+{
+ public int compare(Object o1, Object o2) {
+ //System.out.println("Comparing: " + o1 + ":"+o2);
+ String s1, s2;
+ s1 = new String((byte[])o1);
+ s2 = new String((byte[])o2);
+ return s1.compareToIgnoreCase(s2);
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/LogCursorTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/LogCursorTest.java
new file mode 100644
index 0000000..ad6e624
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/LogCursorTest.java
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+
+package com.sleepycat.db.test;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import com.sleepycat.db.test.TestUtils;
+public class LogCursorTest {
+ public static final String LOGCURSORTEST_DBNAME = "logcursortest.db";
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(LOGCURSORTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(LOGCURSORTEST_DBNAME));
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(LOGCURSORTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(LOGCURSORTEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+ /*
+ * Test case implementations.
+ * To disable a test mark it with @Ignore
+ * To set a timeout(ms) notate like: @Test(timeout=1000)
+ * To indicate an expected exception notate like: (expected=Exception)
+ */
+
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ Environment env;
+ EnvironmentConfig envCfg;
+ Database db;
+ DatabaseConfig cfg;
+ File home;
+ int key_count = 50, lc_count = 0;
+
+ envCfg = new EnvironmentConfig();
+ envCfg.setAllowCreate(true);
+ envCfg.setInitializeCache(true);
+ envCfg.setInitializeLocking(true);
+ envCfg.setInitializeLogging(true);
+ envCfg.setMaxLogFileSize(32768);
+ envCfg.setTransactional(true);
+
+ env = new Environment(TestUtils.BASETEST_DBFILE, envCfg);
+
+ cfg = new DatabaseConfig();
+ cfg.setAllowCreate(true);
+ cfg.setType(DatabaseType.BTREE);
+ cfg.setTransactional(true);
+ db = env.openDatabase(null, LOGCURSORTEST_DBNAME, null, cfg);
+
+ for (int i =0; i < key_count; i++) {
+ DatabaseEntry key = new DatabaseEntry();
+ key.setData(String.valueOf(i).getBytes());
+ DatabaseEntry data =new DatabaseEntry();
+ data.setData(String.valueOf(500-i).getBytes());
+ db.put(null, key, data);
+ }
+
+ LogCursor lc = env.openLogCursor();
+ LogSequenceNumber lsn = new LogSequenceNumber();
+ DatabaseEntry dbt = new DatabaseEntry();
+ while (lc.getNext(lsn, dbt) == OperationStatus.SUCCESS)
+ lc_count++;
+ lc.close();
+ db.close();
+ env.close();
+ // There should be at least as many log entries as there were
+ // keys inserted.
+ assertTrue(lc_count > key_count);
+
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/MultipleCursorTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/MultipleCursorTest.java
new file mode 100644
index 0000000..6b95457
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/MultipleCursorTest.java
@@ -0,0 +1,239 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+
+package com.sleepycat.db.test;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import com.sleepycat.db.test.TestUtils;
+
+public class MultipleCursorTest {
+ public static final String MULTIPLECURSORTEST_DBNAME = "multiplecursortest.db";
+
+ /* The data used by this test. */
+ private static final String[] Key_Strings = {
+ "abc",
+ "def",
+ "ghi",
+ "jkl",
+ "mno",
+ "pqr",
+ "stu",
+ "vwx",
+ "yza",
+ "bcd",
+ "efg",
+ "hij",
+ "klm",
+ "nop",
+ "qrs",
+ "tuv",
+ "wxy",
+ };
+ private static boolean verbose = false;
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(MULTIPLECURSORTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(MULTIPLECURSORTEST_DBNAME));
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(MULTIPLECURSORTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(MULTIPLECURSORTEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+ public static void main(String []argv) {
+ verbose = true;
+ if (argv.length > 0 && argv[0].equals("-s")) {
+ try {
+ java.lang.Thread.sleep(15*1000);
+ } catch (InterruptedException e) {
+ }
+ }
+ try {
+ MultipleCursorTest mpt = new MultipleCursorTest();
+ mpt.testMultiplePut();
+ mpt.testMultipleDelete();
+ } catch (DatabaseException dbe) {
+ System.out.println("MultipleCursorTest threw DatabaseException");
+ } catch (FileNotFoundException fnfe) {
+ System.out.println("MultipleCursorTest threw FileNotFound");
+ }
+ }
+ /*
+ * Test case implementations.
+ * To disable a test mark it with @Ignore
+ * To set a timeout(ms) notate like: @Test(timeout=1000)
+ * To indicate an expected exception notate like: (expected=Exception)
+ */
+
+ @Test public void testMultiplePut()
+ throws DatabaseException, FileNotFoundException
+ {
+ Database db = createDatabase();
+ byte [] buffer = new byte[1024];
+ byte [] buffer2 = new byte[1024];
+ int i;
+
+ /* Build up a bulk key/data pair. */
+ MultipleKeyDataEntry kd = new MultipleKeyDataEntry(buffer);
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry data = new DatabaseEntry();
+ /* Put 3 in the first round. */
+ for (i = 0; i < 3; i++) {
+ key.setData(Key_Strings[i].getBytes());
+ data.setData(Key_Strings[i].getBytes());
+ kd.append(key, data);
+ }
+ if (verbose)
+ System.out.println("Built up a multi-key/data buffer.");
+ db.putMultipleKey(null, kd, false);
+ if (verbose)
+ System.out.println("Put a multi-key/data buffer.");
+
+ /* Build up separate bulk key/data DatabaseEntries */
+ MultipleDataEntry keys = new MultipleDataEntry(buffer);
+ MultipleDataEntry datas = new MultipleDataEntry(buffer2);
+ /* Put 3 in the second round. */
+ for (; i < 6; i++) {
+ key.setData(Key_Strings[i].getBytes());
+ data.setData(Key_Strings[i].getBytes());
+ keys.append(key);
+ datas.append(data);
+ }
+ if (verbose)
+ System.out.println("Built up multi-key and data buffers.");
+ db.putMultiple(null, keys, datas, false);
+ if (verbose)
+ System.out.println("Put multi-key and data buffers.");
+
+ // Bulk cursor, adding single items.
+ Cursor dbc = db.openCursor(null, CursorConfig.BULK_CURSOR);
+ for (; i < 12; i++) {
+ key.setData(Key_Strings[i].getBytes());
+ data.setData(Key_Strings[i].getBytes());
+ dbc.put(key, data);
+ }
+ dbc.close();
+
+ if (verbose)
+ dumpDatabase(db);
+ }
+ @Test public void testMultipleDelete()
+ throws DatabaseException, FileNotFoundException
+ {
+ byte [] buffer = new byte[1024];
+ int i;
+ Database db = createDatabase();
+ populateDatabase(db, 0);
+
+ /* Build up separate bulk key/data DatabaseEntries */
+ MultipleDataEntry keys = new MultipleDataEntry(buffer);
+ DatabaseEntry key = new DatabaseEntry();
+ /* Put 3 in the second round. */
+ for (i = 0; i < 6; i++) {
+ key.setData(Key_Strings[i].getBytes());
+ keys.append(key);
+ }
+ db.deleteMultiple(null, keys);
+ // Bulk cursor, adding single items.
+ DatabaseEntry data = new DatabaseEntry();
+ Cursor dbc = db.openCursor(null, CursorConfig.BULK_CURSOR);
+ for (; i < 12; i++) {
+ key.setData(Key_Strings[i].getBytes());
+ dbc.getSearchKey(key, data, LockMode.DEFAULT);
+ dbc.delete();
+ }
+ dbc.close();
+
+ // Should have about 3 entries left.
+ if (verbose)
+ dumpDatabase(db);
+ }
+
+ /* Not implemented yet.
+ @Test public void testMultipleGet()
+ throws DatabaseException, FileNotFoundException
+ {
+ Database db = createDatabase();
+ populateDatabase(db, 0);
+ }
+ */
+
+ private Database createDatabase()
+ throws DatabaseException, FileNotFoundException
+ {
+ /* Create database. */
+ Database db;
+ DatabaseConfig db_config = new DatabaseConfig();
+ String name = TestUtils.getDBFileName(MULTIPLECURSORTEST_DBNAME);
+
+ db_config.setAllowCreate(true);
+ db_config.setType(DatabaseType.BTREE);
+ db_config.setSortedDuplicates(true);
+
+ db = new Database(name, null, db_config);
+ return db;
+ }
+
+ private void populateDatabase(Database db, int duplicates)
+ throws DatabaseException, FileNotFoundException
+ {
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry data = new DatabaseEntry();
+ for (int i = 0; i < Key_Strings.length; i++) {
+ String datastr = new Integer(i).toString() + Key_Strings[i] + Key_Strings[i];
+ key.setData(Key_Strings[i].getBytes());
+ data.setData(datastr.getBytes());
+ db.put(null, key, data);
+ for (int j = 0; j < duplicates; j++) {
+ datastr = new Integer(j).toString() + datastr + Key_Strings[i];
+ data.setData(datastr.getBytes());
+ db.put(null, key, data);
+
+ }
+ }
+ }
+
+ private void dumpDatabase(Database db) {
+ try {
+ Cursor dbc = db.openCursor(null, CursorConfig.DEFAULT);
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry data = new DatabaseEntry();
+
+ System.out.println("Dumping database contents:");
+ while (dbc.getNext(key, data, LockMode.DEFAULT) != OperationStatus.NOTFOUND) {
+ System.out.println("\tGot key : " + new String(key.getData()));
+ System.out.println("\t data: " + new String(data.getData()));
+ }
+ System.out.println("Finished dumping database contents.");
+ } catch (DatabaseException dbe) {
+ System.err.println("dumpDatabase caught an exception.");
+ }
+ }
+
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/PartialGetTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/PartialGetTest.java
new file mode 100644
index 0000000..76ac0c1
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/PartialGetTest.java
@@ -0,0 +1,264 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import com.sleepycat.db.test.TestUtils;
+public class PartialGetTest {
+ public static final String PARTIALGETTEST_DBNAME = "partialgettest.db";
+ public static final byte[] data_64chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890<>".getBytes();
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(PARTIALGETTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(PARTIALGETTEST_DBNAME));
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(PARTIALGETTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(PARTIALGETTEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(PARTIALGETTEST_DBNAME), true, true);
+ }
+ /*
+ * Test case implementations.
+ * To disable a test mark it with @Ignore
+ * To set a timeout(ms) notate like: @Test(timeout=1000)
+ * To indicate an expected exception notate like: (expected=Exception)
+ */
+
+ /*
+ * Simple partial gets on a record which is on a single page.
+ */
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ DatabaseEntry key = new DatabaseEntry("key".getBytes());
+ Database db = setupDb1(key, data_64chars);
+
+ StringEntry partialData = new StringEntry();
+ partialData.setPartial(true);
+ partialData.setPartial(0, 12, true);
+
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ fail("Failed doing partial retrieval, first part of entry on single page.");
+ // Validate the data.
+ if (!MatchData(data_64chars, partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ partialData.setPartial(12, 12, true);
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ fail("Failed doing partial retrieval, second part of entry on single page.");
+ // Validate the data.
+ if (!MatchData(new String(data_64chars, 12, 12), partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ db.close(false);
+ }
+
+ /*
+ * Retrieve entry using different DB_DBT_alloc flags.
+ * Verify results.
+ */
+ @Test public void test2()
+ throws DatabaseException, FileNotFoundException
+ {
+ DatabaseEntry key = new DatabaseEntry("key".getBytes());
+ Database db = setupDb1(key, data_64chars);
+ StringEntry partialData = new StringEntry();
+ partialData.setPartial(true);
+ partialData.setPartial(0, 12, true);
+
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ fail("Failed doing partial retrieval.");
+ // Validate the data.
+ if (!MatchData(data_64chars, partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ partialData.setReuseBuffer(true);
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ if (!MatchData(data_64chars, partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ partialData.setReuseBuffer(false);
+ partialData.setUserBuffer(64, true);
+ partialData.setData(new byte[64]);
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ if (!MatchData(data_64chars, partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ partialData.setPartial(12, 12, true);
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ fail("Failed doing partial retrieval.");
+ // Validate the data.
+ if (!MatchData(new String(data_64chars, 12, 12), partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ db.close(false);
+ }
+
+ /* Retrieve entry that spans multiple pages. */
+
+ @Test public void test3()
+ throws DatabaseException, FileNotFoundException
+ {
+ DatabaseEntry key = new DatabaseEntry("key".getBytes());
+ StringBuffer sb = new StringBuffer(1024*100);
+ for(int i = 0; i < 1024; i++) {
+ sb.append("abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()_+-=");
+ sb.append("abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()_+-=");
+ }
+ Database db = setupDb1(key, sb.toString().getBytes());
+
+ StringEntry partialData = new StringEntry();
+ partialData.setPartial(true);
+ partialData.setPartial(0, 12, true);
+
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ fail("Failed doing partial retrieval.");
+ // Validate the data.
+ if (!MatchData(data_64chars, partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ // retrieve a chunk larger than a page size, starting at offset 0.
+ partialData.setPartial(0, 2048, true);
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ fail("Failed doing partial retrieval.");
+ // Validate the data.
+ if (!MatchData(sb.substring(0, 2048), partialData.getString(), 2048))
+ fail("Data mismatch from partial get.");
+
+ // retrieve a chunk larger than a page size, starting at offset greater than 0.
+ partialData.setPartial(10, 2048, true);
+ if (db.get(null, key, partialData, LockMode.DEFAULT) !=
+ OperationStatus.SUCCESS)
+ fail("Failed doing partial retrieval.");
+ // Validate the data.
+ if (!MatchData(sb.substring(10, 2048+10), partialData.getString(), 12))
+ fail("Data mismatch from partial get.");
+
+ db.close(false);
+ }
+
+ /*
+ * Test partial retrieval using a cursor.
+ */
+ @Test public void test4()
+ throws DatabaseException, FileNotFoundException
+ {
+ }
+
+ /*
+ * Test partial retrieval using different DB types.
+ */
+ @Test public void test5()
+ throws DatabaseException, FileNotFoundException
+ {
+ }
+
+ /*
+ * Test partial retrieval .
+ */
+ @Test public void test6()
+ throws DatabaseException, FileNotFoundException
+ {
+ }
+
+ /*
+ * Helper methods and classes follow.
+ */
+
+ private Database setupDb1(DatabaseEntry key, byte[] dataData)
+ throws DatabaseException, FileNotFoundException
+ {
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ dbConfig.setErrorStream(TestUtils.getErrorStream());
+ dbConfig.setErrorPrefix("PartialGetTest");
+ dbConfig.setType(DatabaseType.BTREE);
+ dbConfig.setPageSize(1024);
+ dbConfig.setAllowCreate(true);
+ Database db = new Database(TestUtils.getDBFileName(PARTIALGETTEST_DBNAME), null, dbConfig);
+
+ DatabaseEntry data = new DatabaseEntry(dataData);
+
+ if(db.putNoOverwrite(null, key, data) != OperationStatus.SUCCESS)
+ TestUtils.ERR("Failed to create standard entry in database.");
+
+ return db;
+ }
+
+ /* Save converting String to do data comparisons. */
+ private boolean MatchData(byte[] data1, byte[] data2, int len)
+ {
+ return MatchData(new String(data1), new String(data2), len);
+ }
+ private boolean MatchData(String data1, byte[] data2, int len)
+ {
+ return MatchData(data1, new String(data2), len);
+ }
+ private boolean MatchData(byte[] data1, String data2, int len)
+ {
+ return MatchData(new String(data1), data2, len);
+ }
+ private boolean MatchData(String data1, String data2, int len)
+ {
+ if(data1.length() < len || data2.length() < len)
+ return false;
+ TestUtils.DEBUGOUT(0, "data1: " +data1.substring(0, 12));
+ TestUtils.DEBUGOUT(0, "data2: " +data2.substring(0, 12));
+ return data1.regionMatches(0, data2, 0, len);
+ }
+
+ static /*inner*/
+ class StringEntry extends DatabaseEntry {
+ StringEntry() {
+ }
+
+ StringEntry (String value) {
+ setString(value);
+ }
+
+ void setString(String value) {
+ byte[] data = value.getBytes();
+ setData(data);
+ setSize(data.length);
+ }
+
+ String getString() {
+ return new String(getData(), getOffset(), getSize());
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrConfigTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrConfigTest.java
new file mode 100644
index 0000000..3cf8b99
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrConfigTest.java
@@ -0,0 +1,356 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+
+/*
+ * A test case that brings up the replication
+ * manager infrastructure as master. Then shut
+ * the master down cleanly.
+ * This case does not have any replication clients
+ * or even update the underlying DB.
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import junit.framework.JUnit4TestAdapter;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.db.*;
+
+public class RepmgrConfigTest extends EventHandlerAdapter
+{
+ private class ConfigOptions {
+ public ConfigOptions(
+ boolean txnSync,
+ boolean initializeReplication,
+ boolean verboseReplication,
+ boolean setLocalSiteEnable,
+ boolean setReplicationPriority,
+ int replicationPriority,
+ boolean setValidEventHandler,
+ boolean setAckPolicy,
+ ReplicationManagerAckPolicy ackPolicyToSet,
+ int nsitesToStart,
+ boolean validStartPolicy,
+ int requestMin,
+ int requestMax,
+ boolean validPolicy
+ )
+ {
+ this.txnSync = txnSync;
+ this.initializeReplication = initializeReplication;
+ this.verboseReplication = verboseReplication;
+ this.setLocalSiteEnable = setLocalSiteEnable;
+ this.setReplicationPriority = setReplicationPriority;
+ this.replicationPriority = replicationPriority;
+ this.setValidEventHandler = setValidEventHandler;
+ this.setAckPolicy = setAckPolicy;
+ this.ackPolicyToSet = ackPolicyToSet;
+ this.nsitesToStart = nsitesToStart;
+ this.validStartPolicy = validStartPolicy;
+ this.validPolicy = validPolicy;
+ this.requestMin = requestMin;
+ this.requestMax = requestMax;
+ }
+
+ boolean txnSync;
+ boolean initializeReplication;
+ boolean verboseReplication;
+ boolean setLocalSiteEnable;
+ boolean setValidEventHandler;
+ boolean setReplicationPriority;
+ int replicationPriority;
+ boolean setAckPolicy;
+ ReplicationManagerAckPolicy ackPolicyToSet;
+ int nsitesToStart;
+ boolean validStartPolicy;
+ int requestMin;
+ int requestMax;
+
+ // should this set of options work or not?
+ boolean validPolicy;
+ }
+ static String address = "localhost";
+ static int port = 4242;
+ static int priority = 100;
+ static String homedirName = "";
+
+ ConfigOptions[] optionVariations =
+ { new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, true ), //0:
+ new ConfigOptions(false, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, true ), //1: disable txnSync
+ new ConfigOptions(true, false, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, false ), //2: don't call initRep
+ new ConfigOptions(true, true, true, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, true ), //3: enable verbose rep
+ new ConfigOptions(true, true, false, false, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, false ), //4: don't set a local site
+ new ConfigOptions(true, true, false, true, false, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, true ), //5: don't assign priority explicitly
+ new ConfigOptions(true, true, false, true, true, -1, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, true ), //6: ??assign an invalid priority.
+ new ConfigOptions(true, true, false, true, true, 50, false, true, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, false ), //7: don't set the event handler.
+ new ConfigOptions(true, true, false, true, true, 50, true, false, ReplicationManagerAckPolicy.ALL, 1, true, 3, 9, true ), //8: ?? don't set ack policy
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL_PEERS, 1, true, 3, 9, true ), //9:
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.NONE, 1, true, 3, 9, true ), //10:
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ONE, 1, true, 3, 9, true ), //11:
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ONE_PEER, 1, true, 3, 9, true ), //12:
+ new ConfigOptions(true, true, false, true, true, 50, true, true, null, 1, true, 3, 9, false ), //13: set an invalid ack policy
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, -1, true, 3, 9, false ), //14: set nsites negative
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 0, true, 3, 9, false ), //15:
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, false, 3, 9, false ), //16: dont set a valid start policy.
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 0, 9, false ), //17: set requestMin as 0
+ new ConfigOptions(true, true, false, true, true, 50, true, true, ReplicationManagerAckPolicy.ALL, 1, true, 9, 3, false ), //18: set requestMax < requestMin
+ };
+ File homedir;
+ EnvironmentConfig envConfig;
+
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ homedirName = TestUtils.BASETEST_DBDIR + "/TESTDIR";
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ }
+
+ @Before public void PerTestInit()
+ {
+ TestUtils.removeDir(homedirName);
+ try {
+ homedir = new File(homedirName);
+ homedir.mkdir();
+ } catch (Exception e) {
+ TestUtils.DEBUGOUT(2, "Warning: initialization had a problem creating a clean directory.\n" + e);
+ }
+ try {
+ homedir = new File(homedirName);
+ } catch (NullPointerException npe) {
+ // can't really happen :)
+ }
+ envConfig = new EnvironmentConfig();
+ envConfig.setErrorStream(TestUtils.getErrorStream());
+ envConfig.setErrorPrefix("RepmgrConfigTest test");
+ envConfig.setAllowCreate(true);
+ envConfig.setRunRecovery(true);
+ envConfig.setThreaded(true);
+ envConfig.setInitializeLocking(true);
+ envConfig.setInitializeLogging(true);
+ envConfig.setInitializeCache(true);
+ envConfig.setTransactional(true);
+
+ // Linux seems to have problems cleaning up its sockets.
+ // so start each test at a new address.
+ ++port;
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ TestUtils.removeDir(homedirName);
+ }
+
+ @Test (timeout=3000) public void TestOptions0()
+ {
+ assertTrue(runTestWithOptions(optionVariations[0]));
+ }
+ @Test (timeout=3000) public void TestOptions1()
+ {
+ assertTrue(runTestWithOptions(optionVariations[1]));
+ }
+ @Test (timeout=3000) public void TestOptions2()
+ {
+ assertTrue(runTestWithOptions(optionVariations[2]));
+ }
+ @Test (timeout=3000) public void TestOptions3()
+ {
+ assertTrue(runTestWithOptions(optionVariations[3]));
+ }
+ @Test (timeout=3000) public void TestOptions4()
+ {
+ assertTrue(runTestWithOptions(optionVariations[4]));
+ }
+ @Test (timeout=3000) public void TestOptions5()
+ {
+ assertTrue(runTestWithOptions(optionVariations[5]));
+ }
+ @Test (timeout=3000) public void TestOptions6()
+ {
+ assertTrue(runTestWithOptions(optionVariations[6]));
+ }
+ @Ignore("Currently failing") @Test (timeout=3000) public void TestOptions7()
+ {
+ assertTrue(runTestWithOptions(optionVariations[7]));
+ }
+ @Test (timeout=3000) public void TestOptions8()
+ {
+ assertTrue(runTestWithOptions(optionVariations[8]));
+ }
+ @Test (timeout=3000) public void TestOptions9()
+ {
+ assertTrue(runTestWithOptions(optionVariations[9]));
+ }
+ @Test (timeout=3000) public void TestOptions10()
+ {
+ assertTrue(runTestWithOptions(optionVariations[10]));
+ }
+ @Test (timeout=3000) public void TestOptions11()
+ {
+ assertTrue(runTestWithOptions(optionVariations[11]));
+ }
+ @Test (timeout=3000) public void TestOptions12()
+ {
+ assertTrue(runTestWithOptions(optionVariations[12]));
+ }
+ @Test (timeout=3000) public void TestOptions13()
+ {
+ assertTrue(runTestWithOptions(optionVariations[13]));
+ }
+ @Test (timeout=3000) public void TestOptions14()
+ {
+ assertTrue(runTestWithOptions(optionVariations[14]));
+ }
+ @Test (timeout=3000) public void TestOptions15()
+ {
+ assertTrue(runTestWithOptions(optionVariations[15]));
+ }
+ @Test (timeout=3000) public void TestOptions16()
+ {
+ assertTrue(runTestWithOptions(optionVariations[16]));
+ }
+ @Test (timeout=3000) public void TestOptions17()
+ {
+ assertTrue(runTestWithOptions(optionVariations[17]));
+ }
+ @Test (timeout=3000) public void TestOptions18()
+ {
+ assertTrue(runTestWithOptions(optionVariations[18]));
+ }
+
+ // returns true if failure matches options failure spec.
+ boolean runTestWithOptions(ConfigOptions opts)
+ {
+ boolean retval = true;
+ boolean gotexcept = false;
+ Environment dbenv = null;
+ try {
+
+ envConfig.setTxnNoSync(opts.txnSync);
+ if (opts.initializeReplication)
+ envConfig.setInitializeReplication(true);
+ if (opts.verboseReplication)
+ envConfig.setVerboseReplication(false);
+
+ if (opts.setLocalSiteEnable) {
+ ReplicationHostAddress haddr = new ReplicationHostAddress(address, port);
+ envConfig.setReplicationManagerLocalSite(haddr);
+ }
+ if (opts.setReplicationPriority)
+ envConfig.setReplicationPriority(opts.replicationPriority);
+ if (opts.setValidEventHandler)
+ envConfig.setEventHandler(this);
+
+ if (opts.setAckPolicy)
+ envConfig.setReplicationManagerAckPolicy(opts.ackPolicyToSet);
+
+ envConfig.setReplicationRequestMin(opts.requestMin);
+ envConfig.setReplicationRequestMax(opts.requestMax);
+
+ try {
+ dbenv = new Environment(homedir, envConfig);
+ } catch(FileNotFoundException e) {
+ TestUtils.DEBUGOUT(3, "Unexpected FNFE in standard environment creation." + e);
+ gotexcept = true;
+ retval = false; // never expect a FNFE
+ } catch(DatabaseException dbe) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected DB exception from Environment create." + dbe);
+ } catch(IllegalArgumentException iae){
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected DB exception from setRepRequest." + iae);
+ }
+
+ if (!gotexcept) {
+ try {
+ // start replication manager
+ if (opts.validStartPolicy)
+ dbenv.replicationManagerStart(opts.nsitesToStart, ReplicationManagerStartPolicy.REP_MASTER);
+ else
+ dbenv.replicationManagerStart(opts.nsitesToStart, null);
+ } catch(DatabaseException dbe) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected database exception came from replicationManagerStart." + dbe);
+ } catch (IllegalArgumentException iae) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected IllegalArgumentException came from replicationManagerStart." + iae);
+ } catch (NullPointerException npe) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected NullPointerException came from replicationManagerStart." + npe);
+ } catch (AssertionError ae) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected AssertionError came from replicationManagerStart." + ae);
+ }
+
+ }
+ } catch (IllegalArgumentException iae) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected IllegalArgumentException." + iae);
+ } catch (AssertionError ae) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected AssertionError." + ae);
+ } catch (NullPointerException npe) {
+ gotexcept = true;
+ if (opts.validPolicy)
+ TestUtils.DEBUGOUT(3, "Unexpected NullPointerException." + npe);
+ }
+ if (dbenv != null) {
+ try {
+ java.lang.Thread.sleep(1000);
+ }catch(InterruptedException ie) {}
+ try {
+ dbenv.close();
+ Environment.remove(homedir, true, envConfig);
+ } catch(FileNotFoundException fnfe) {
+ gotexcept = true;
+ retval = false;
+ } catch(DatabaseException dbe) {
+ TestUtils.DEBUGOUT(3, "Unexpected database exception came during shutdown." + dbe);
+ gotexcept = true; // never expect a shutdown failure.
+ }
+ }
+ if (retval) {
+ if (gotexcept == opts.validPolicy)
+ retval = false;
+ }
+ return retval;
+ }
+
+ /*
+ * TODO: Maybe move this into a general TestEventHandler?
+ */
+ public void handleRepMasterEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_MASTER message");
+ }
+
+ public void handleRepClientEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_CLIENT message");
+ }
+
+ public void handleRepNewMasterEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_NEW_MASTER message");
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrElectionTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrElectionTest.java
new file mode 100644
index 0000000..3f48fd6
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrElectionTest.java
@@ -0,0 +1,205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import junit.framework.JUnit4TestAdapter;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.util.Vector;
+
+import com.sleepycat.db.*;
+
+public class RepmgrElectionTest extends EventHandlerAdapter implements Runnable
+{
+ static String address = "localhost";
+ static int basePort = 4242;
+ static String baseDirName = "";
+ File homedir;
+ EnvironmentConfig envConfig;
+ Environment dbenv;
+
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ baseDirName = TestUtils.BASETEST_DBDIR + "/TESTDIR";
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ for(int j = 0; j < NUM_WORKER_THREADS; j++)
+ {
+ String homedirName = baseDirName+j;
+ TestUtils.removeDir(homedirName);
+ }
+ }
+
+ private static boolean lastSiteStarted = false;
+ private static int NUM_WORKER_THREADS = 5;
+ @Test(timeout=180000) public void startConductor()
+ {
+ Vector<RepmgrElectionTest> workers = new Vector<RepmgrElectionTest>(NUM_WORKER_THREADS);
+ // start the worker threads
+ for (int i = 0; i < NUM_WORKER_THREADS; i++) {
+ RepmgrElectionTest worker = new RepmgrElectionTest(i);
+ worker.run();
+ workers.add(worker);
+ /*
+ while (!lastSiteStarted) {
+ try {
+ java.lang.Thread.sleep(10);
+ }catch(InterruptedException e){}
+ }
+ lastSiteStarted = false;
+ */
+ }
+
+ // stop the master - ensure the client with the highest priority is elected.
+
+ // re-start original master. Call election ensure correct client is elected
+ }
+
+ /*
+ * Worker thread implementation
+ */
+ private final static int priorities[] = {100, 75, 50, 50, 25};
+ private int threadNumber;
+ public RepmgrElectionTest() {
+ // needed to comply with JUnit, since there is also another constructor.
+ }
+ RepmgrElectionTest(int threadNumber) {
+ this.threadNumber = threadNumber;
+ }
+
+ public void run() {
+ EnvironmentConfig envConfig;
+ Environment dbenv = null;
+ TestUtils.DEBUGOUT(1, "Creating worker: " + threadNumber);
+ try {
+ File homedir = new File(baseDirName + threadNumber);
+
+ if (homedir.exists()) {
+ // The following will fail if the directory contains sub-dirs.
+ if (homedir.isDirectory()) {
+ File[] contents = homedir.listFiles();
+ for (int i = 0; i < contents.length; i++)
+ contents[i].delete();
+ }
+ homedir.delete();
+ }
+ homedir.mkdir();
+ } catch (Exception e) {
+ TestUtils.DEBUGOUT(2, "Warning: initialization had a problem creating a clean directory.\n"+e);
+ }
+ try {
+ homedir = new File(baseDirName+threadNumber);
+ } catch (NullPointerException npe) {
+ // can't really happen :)
+ }
+ envConfig = new EnvironmentConfig();
+ envConfig.setErrorStream(TestUtils.getErrorStream());
+ envConfig.setErrorPrefix("RepmgrElectionTest test("+threadNumber+")");
+ envConfig.setAllowCreate(true);
+ envConfig.setRunRecovery(true);
+ envConfig.setThreaded(true);
+ envConfig.setInitializeLocking(true);
+ envConfig.setInitializeLogging(true);
+ envConfig.setInitializeCache(true);
+ envConfig.setTransactional(true);
+ envConfig.setTxnNoSync(true);
+ envConfig.setInitializeReplication(true);
+ envConfig.setVerboseReplication(false);
+
+ ReplicationHostAddress haddr = new ReplicationHostAddress(address, basePort+threadNumber);
+ envConfig.setReplicationManagerLocalSite(haddr);
+ envConfig.setReplicationPriority(priorities[threadNumber]);
+ envConfig.setEventHandler(this);
+ envConfig.setReplicationManagerAckPolicy(ReplicationManagerAckPolicy.ALL);
+
+
+ try {
+ dbenv = new Environment(homedir, envConfig);
+
+ } catch(FileNotFoundException e) {
+ fail("Unexpected FNFE in standard environment creation." + e);
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came from environment create." + dbe);
+ }
+
+ try {
+ /*
+ * If all threads are started with REP_ELECTION flag
+ * The whole system freezes, and I get:
+ * RepmgrElectionTest test(0): Waiting for handle count (1) or msg_th (0) to complete replication lockout
+ * Repeated every minute.
+ */
+ envConfig = dbenv.getConfig();
+ for(int existingSites = 0; existingSites < threadNumber; existingSites++)
+ {
+ /*
+ * This causes warnings to be produced - it seems only
+ * able to make a connection to the master site, not other
+ * client sites.
+ * The documentation and code lead me to believe this is not
+ * as expected - so leaving in here for now.
+ */
+ ReplicationHostAddress host = new ReplicationHostAddress(
+ address, basePort+existingSites);
+ envConfig.replicationManagerAddRemoteSite(host, false);
+ }
+ dbenv.setConfig(envConfig);
+ if(threadNumber == 0)
+ dbenv.replicationManagerStart(NUM_WORKER_THREADS, ReplicationManagerStartPolicy.REP_MASTER);
+ else
+ dbenv.replicationManagerStart(NUM_WORKER_THREADS, ReplicationManagerStartPolicy.REP_CLIENT);
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came from replicationManagerStart." + dbe);
+ }
+ TestUtils.DEBUGOUT(1, "Started replication site: " + threadNumber);
+ lastSiteStarted = true;
+ try {
+ java.lang.Thread.sleep(10000);
+ }catch(InterruptedException ie) {}
+ try {
+ dbenv.close();
+ Environment.remove(homedir, false, envConfig);
+ } catch(FileNotFoundException fnfe) {
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came during shutdown." + dbe);
+ }
+ }
+
+ /*
+ * End worker thread implementation
+ */
+ public void handleRepMasterEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_MASTER message");
+ TestUtils.DEBUGOUT(1, "My priority: " + priorities[threadNumber]);
+ }
+
+ public void handleRepClientEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_CLIENT message");
+ }
+
+ public void handleRepNewMasterEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_NEW_MASTER message");
+ TestUtils.DEBUGOUT(1, "My priority: " + priorities[threadNumber]);
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrStartupTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrStartupTest.java
new file mode 100644
index 0000000..e02fdb7
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/RepmgrStartupTest.java
@@ -0,0 +1,216 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+/*
+ * A test case that brings up the replication
+ * manager infrastructure as master. Then shut
+ * the master down cleanly.
+ * This case does not have any replication clients
+ * or even update the underlying DB.
+ */
+
+package com.sleepycat.db.test;
+
+import com.sleepycat.db.test.TestUtils;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import junit.framework.JUnit4TestAdapter;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import com.sleepycat.db.*;
+
+public class RepmgrStartupTest extends EventHandlerAdapter
+{
+ static String address = "localhost";
+ static int port = 4242;
+ static int priority = 100;
+ static String homedirName = "TESTDIR";
+ File homedir;
+ EnvironmentConfig envConfig;
+ Environment dbenv;
+
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ }
+
+ @Before public void PerTestInit()
+ {
+ TestUtils.removeDir(homedirName);
+ try {
+ homedir = new File(homedirName);
+ homedir.mkdir();
+ } catch (Exception e) {
+ TestUtils.DEBUGOUT(2, "Warning: initialization had a problem creating a clean directory.\n" + e);
+ }
+ try {
+ homedir = new File(homedirName);
+ } catch (NullPointerException npe) {
+ // can't really happen :)
+ }
+ envConfig = new EnvironmentConfig();
+ envConfig.setErrorStream(TestUtils.getErrorStream());
+ envConfig.setErrorPrefix("RepmgrStartupTest test");
+ envConfig.setAllowCreate(true);
+ envConfig.setRunRecovery(true);
+ envConfig.setThreaded(true);
+ envConfig.setInitializeLocking(true);
+ envConfig.setInitializeLogging(true);
+ envConfig.setInitializeCache(true);
+ envConfig.setTransactional(true);
+ envConfig.setTxnNoSync(true);
+ envConfig.setInitializeReplication(true);
+ envConfig.setVerboseReplication(false);
+
+ ReplicationHostAddress haddr = new ReplicationHostAddress(address, port);
+ envConfig.setReplicationManagerLocalSite(haddr);
+ envConfig.setReplicationPriority(priority);
+ envConfig.setEventHandler(this);
+ envConfig.setReplicationManagerAckPolicy(ReplicationManagerAckPolicy.ALL);
+
+ try {
+ dbenv = new Environment(homedir, envConfig);
+ } catch(FileNotFoundException e) {
+ fail("Unexpected FNFE in standard environment creation." + e);
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came from environment create." + dbe);
+ }
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ try {
+ File homedir = new File(homedirName);
+
+ if (homedir.exists()) {
+ // The following will fail if the directory contains sub-dirs.
+ if (homedir.isDirectory()) {
+ File[] contents = homedir.listFiles();
+ for (int i = 0; i < contents.length; i++)
+ contents[i].delete();
+ }
+ homedir.delete();
+ }
+ } catch (Exception e) {
+ TestUtils.DEBUGOUT(2, "Warning: shutdown had a problem cleaning up test directory.\n" + e);
+ }
+ }
+
+
+ @Test (timeout=3000) public void startMaster()
+ {
+ try {
+ // start replication manager
+ dbenv.replicationManagerStart(3, ReplicationManagerStartPolicy.REP_MASTER);
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came from replicationManagerStart." + dbe);
+ }
+ try {
+ java.lang.Thread.sleep(1000);
+ }catch(InterruptedException ie) {}
+
+ try {
+ dbenv.close();
+ Environment.remove(homedir, false, envConfig);
+ } catch(FileNotFoundException fnfe) {
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came during shutdown." + dbe);
+ }
+ }
+
+ @Test (timeout=3000) public void startClient()
+ {
+ try {
+ // start replication manager
+ dbenv.replicationManagerStart(3, ReplicationManagerStartPolicy.REP_CLIENT);
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came from replicationManagerStart." + dbe);
+ }
+ try {
+ java.lang.Thread.sleep(1000);
+ }catch(InterruptedException ie) {}
+
+ try {
+ dbenv.close();
+ Environment.remove(homedir, false, envConfig);
+ } catch(FileNotFoundException fnfe) {
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came during shutdown." + dbe);
+ }
+ }
+
+ @Test (timeout=3000) public void startElection()
+ {
+ try {
+ // start replication manager
+ dbenv.replicationManagerStart(3, ReplicationManagerStartPolicy.REP_ELECTION);
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came from replicationManagerStart." + dbe);
+ }
+ try {
+ java.lang.Thread.sleep(1000);
+ }catch(InterruptedException ie) {}
+
+ try {
+ dbenv.close();
+ Environment.remove(homedir, false, envConfig);
+ } catch(FileNotFoundException fnfe) {
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came during shutdown." + dbe);
+ }
+ }
+
+ @Test (timeout=15000) public void startMasterWaitBeforeShutdown()
+ {
+ try {
+ // start replication manager
+ dbenv.replicationManagerStart(3, ReplicationManagerStartPolicy.REP_MASTER);
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came from replicationManagerStart." + dbe.toString());
+ }
+ try {
+ /*
+ * NOTE! This is a bit alarming - I have seen shutdown failures with the following message:
+ *
+ * RepmgrStartupTest test: Waiting for handle count (1) or msg_th (0) to complete replication lockout
+ *
+ * When the sleep is over 10 seconds.
+ */
+ java.lang.Thread.sleep(12000);
+ }catch(InterruptedException ie) {}
+
+ try {
+ dbenv.close();
+ Environment.remove(homedir, false, envConfig);
+ } catch(FileNotFoundException fnfe) {
+ } catch(DatabaseException dbe) {
+ fail("Unexpected database exception came during shutdown." + dbe.toString());
+ }
+ }
+
+ public void handleRepMasterEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_MASTER message");
+ }
+
+ public void handleRepClientEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_CLIENT message");
+ }
+
+ public void handleRepNewMasterEvent() {
+ TestUtils.DEBUGOUT(1, "Got a REP_NEW_MASTER message");
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/TestUtils.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/TestUtils.java
new file mode 100644
index 0000000..140371a
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/TestUtils.java
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ */
+
+/*
+ * Generally useful functions :)
+ */
+
+package com.sleepycat.db.test;
+
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.FileNotFoundException;
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Properties;
+
+public class TestUtils
+{
+ public static boolean config_loaded = false;
+ public static boolean verbose_flag = false;
+ public static int debug_level = 2;
+
+ // should be initialized by calling loadEnvVars. Shared between all tests.
+ public static String BASETEST_DBDIR = "";
+ public static File BASETEST_DBFILE = null; // new File(TestUtils.BASETEST_DBDIR);
+
+ public static void ERR(String a)
+ {
+ System.err.println("FAIL: " + a);
+ fail(a);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ DEBUGOUT(1, s);
+ }
+
+ public static void DEBUGOUT(int importance, String s)
+ {
+ if(importance > debug_level)
+ System.out.println("DEBUG: " +s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ public static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ DEBUGOUT(1, "File \"" + name + "\" still exists after check_file_removed\n");
+ if (fatal)
+ fail("File \"" + name + "\" still exists after check_file_removed");
+ }
+ }
+
+
+ // remove any existing environment or database
+ public static void removeall(boolean use_db, boolean remove_env, String envpath, String dbname)
+ {
+ {
+ try {
+ if (remove_env)
+ Environment.remove(new File(envpath), true, EnvironmentConfig.DEFAULT);
+ if (use_db)
+ Database.remove(dbname, null, DatabaseConfig.DEFAULT);
+ }
+ catch (DatabaseException dbe) {
+ DEBUGOUT(1, "TestUtil::removeall exception caught: " + dbe);
+ }
+ catch (FileNotFoundException dbe) {
+ DEBUGOUT(1, "TestUtil::removeall exception caught: " + dbe);
+ }
+ }
+ check_file_removed(dbname, false, !use_db);
+ if (remove_env) {
+ for (int i=0; i<8; i++) {
+ String fname = envpath + "/" + "__db." + i;
+ check_file_removed(fname, true, !use_db);
+ }
+
+ // ensure the user knows if there is junk remaining.
+ // clean out spurious log.00X files
+ File dir = new File(envpath);
+ if(dir.isDirectory()) {
+ String[] remainingfiles = dir.list();
+ for(int i = 0; i < remainingfiles.length; i++) {
+ if(remainingfiles[i].startsWith("log") || remainingfiles[i].endsWith("db2") ||
+ remainingfiles[i].endsWith("log") || remainingfiles[i].startsWith("__db")) {
+ DEBUGOUT(1, "TestUtils::removeall removing: " +remainingfiles[i]);
+ check_file_removed(envpath + "/" + remainingfiles[i], false, true);
+ } else {
+ if(remainingfiles[i].indexOf("del") == -1)
+ DEBUGOUT(3, "TestUtils::removeall warning, file: " + remainingfiles[i] + " remains in directory after cleanup.");
+ }
+ }
+ }
+ }
+ }
+
+ public static boolean removeDir(String dirname)
+ {
+ try {
+ File deldir = new File(dirname);
+
+ if (!deldir.exists()) {
+ return true;
+ } else if(!deldir.isDirectory()) {
+ return false;
+ } else {
+ // The following will fail if the directory contains sub-dirs.
+ File[] contents = deldir.listFiles();
+ for (int i = 0; i < contents.length; i++)
+ contents[i].delete();
+ deldir.delete();
+ }
+ } catch (Exception e) {
+ TestUtils.DEBUGOUT(4, "Warning: error encountered removing directory.\n" + e);
+ }
+ return true;
+ }
+
+ static public String shownull(Object o)
+ {
+ if (o == null)
+ return "null";
+ else
+ return "not null";
+ }
+
+ /*
+ * The config file is not currently required.
+ * The only variable that can be set via the
+ * config file is the base directory for the
+ * tests to be run in. The default is "data"
+ * and will be created for the tests.
+ */
+ public static void loadConfig(String envfilename)
+ {
+ if(config_loaded)
+ return;
+
+ String configname = envfilename;
+ if(envfilename == null)
+ {
+ String OSStr = java.lang.System.getProperty("os.name");
+ if((OSStr.toLowerCase()).indexOf("windows") != -1)
+ {
+ configname = "config_win32";
+ } else {
+ // assume a nix variant.
+ configname = "config_nix";
+ }
+ }
+ config_loaded = true;
+ try {
+ InputStream in = new FileInputStream(configname);
+ DEBUGOUT(2, "Opened " + configname + " to read configuration.");
+ Properties props = new Properties();
+ props.load(in);
+
+ String var = props.getProperty("BASETEST_DBDIR");
+ if(var != null)
+ { // Property seems to encase things in "";
+ var = var.substring(1);
+ var = var.substring(0, var.length() -2);
+ BASETEST_DBDIR = var;
+ }
+ DEBUGOUT(2, "BASETEST_DBDIR is: " + BASETEST_DBDIR);
+
+ } catch (Exception e) {
+ // expected - the config file is optional.
+ DEBUGOUT(0, "loadEnvVars -- loading of default variables failed. error: " + e);
+ }
+ if (BASETEST_DBDIR == "")
+ BASETEST_DBDIR = "data";
+ BASETEST_DBFILE = new File(BASETEST_DBDIR);
+ if (!BASETEST_DBFILE.exists())
+ BASETEST_DBFILE.mkdirs();
+ }
+
+ public static String getDBFileName(String dbname)
+ {
+ DEBUGOUT(1, "getDBFileName returning: " + BASETEST_DBDIR + "/" + dbname);
+ return BASETEST_DBDIR + "/" + dbname;
+ }
+
+ public static OutputStream getErrorStream()
+ {
+ OutputStream retval = System.err;
+ try {
+ File outfile = new File(BASETEST_DBDIR + "/" + "errstream.log");
+ if(outfile.exists())
+ {
+ outfile.delete();
+ outfile.createNewFile();
+ } else {
+ outfile.createNewFile();
+ }
+ retval = new FileOutputStream(outfile);
+ } catch (FileNotFoundException fnfe) {
+ DEBUGOUT(3, "Unable to open error log file. " + fnfe);
+ } catch (IOException ioe) {
+ DEBUGOUT(3, "Unable to create error log file. " + ioe);
+ }
+ return retval;
+ }
+}
diff --git a/db-4.8.30/test/scr016/src/com/sleepycat/db/test/VerboseConfigTest.java b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/VerboseConfigTest.java
new file mode 100644
index 0000000..8438a40
--- /dev/null
+++ b/db-4.8.30/test/scr016/src/com/sleepycat/db/test/VerboseConfigTest.java
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+package com.sleepycat.db.test;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import com.sleepycat.db.*;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+import com.sleepycat.db.test.TestUtils;
+public class VerboseConfigTest {
+ public static final String VERBOSECONFIGTEST_DBNAME = "verboseconfigtest.db";
+ @BeforeClass public static void ClassInit() {
+ TestUtils.loadConfig(null);
+ TestUtils.check_file_removed(TestUtils.getDBFileName(VERBOSECONFIGTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(VERBOSECONFIGTEST_DBNAME));
+ }
+
+ @AfterClass public static void ClassShutdown() {
+ TestUtils.check_file_removed(TestUtils.getDBFileName(VERBOSECONFIGTEST_DBNAME), true, true);
+ TestUtils.removeall(true, true, TestUtils.BASETEST_DBDIR, TestUtils.getDBFileName(VERBOSECONFIGTEST_DBNAME));
+ }
+
+ @Before public void PerTestInit()
+ throws Exception {
+ }
+
+ @After public void PerTestShutdown()
+ throws Exception {
+ }
+ /*
+ * Test case implementations.
+ * To disable a test mark it with @Ignore
+ * To set a timeout(ms) notate like: @Test(timeout=1000)
+ * To indicate an expected exception notate like: (expected=Exception)
+ */
+
+ @Test public void test1()
+ throws DatabaseException, FileNotFoundException
+ {
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ envc.setVerbose(VerboseConfig.DEADLOCK, true);
+ envc.setVerbose(VerboseConfig.FILEOPS, true);
+ envc.setVerbose(VerboseConfig.FILEOPS_ALL, true);
+ envc.setVerbose(VerboseConfig.RECOVERY, true);
+ envc.setVerbose(VerboseConfig.REGISTER, true);
+ envc.setVerbose(VerboseConfig.REPLICATION, true);
+ envc.setVerbose(VerboseConfig.WAITSFOR, true);
+ envc.setMessageStream(new FileOutputStream(new File("messages.txt")));
+ Environment db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ new File("messages.txt").delete();
+ }
+
+ /*
+ * Tests for old (now deprecated) API.
+ */
+ @Test public void test2()
+ throws DatabaseException, FileNotFoundException
+ {
+ EnvironmentConfig envc = new EnvironmentConfig();
+ envc.setAllowCreate(true);
+ envc.setInitializeCache(true);
+ envc.setVerboseDeadlock(true);
+ envc.setVerboseRecovery(true);
+ envc.setVerboseRegister(true);
+ envc.setVerboseReplication(true);
+ envc.setVerboseWaitsFor(true);
+ envc.setMessageStream(new FileOutputStream(new File("messages.txt")));
+ Environment db_env = new Environment(TestUtils.BASETEST_DBFILE, envc);
+
+ new File("messages.txt").delete();
+ }
+}
diff --git a/db-4.8.30/test/scr017/O.BH b/db-4.8.30/test/scr017/O.BH
new file mode 100644
index 0000000..cd499d3
--- /dev/null
+++ b/db-4.8.30/test/scr017/O.BH
@@ -0,0 +1,196 @@
+abc_10_efg
+abc_10_efg
+abc_11_efg
+abc_11_efg
+abc_12_efg
+abc_12_efg
+abc_13_efg
+abc_13_efg
+abc_14_efg
+abc_14_efg
+abc_15_efg
+abc_15_efg
+abc_16_efg
+abc_16_efg
+abc_17_efg
+abc_17_efg
+abc_18_efg
+abc_18_efg
+abc_19_efg
+abc_19_efg
+abc_1_efg
+abc_1_efg
+abc_20_efg
+abc_20_efg
+abc_21_efg
+abc_21_efg
+abc_22_efg
+abc_22_efg
+abc_23_efg
+abc_23_efg
+abc_24_efg
+abc_24_efg
+abc_25_efg
+abc_25_efg
+abc_26_efg
+abc_26_efg
+abc_27_efg
+abc_27_efg
+abc_28_efg
+abc_28_efg
+abc_29_efg
+abc_29_efg
+abc_2_efg
+abc_2_efg
+abc_30_efg
+abc_30_efg
+abc_31_efg
+abc_31_efg
+abc_32_efg
+abc_32_efg
+abc_33_efg
+abc_33_efg
+abc_34_efg
+abc_34_efg
+abc_36_efg
+abc_36_efg
+abc_37_efg
+abc_37_efg
+abc_38_efg
+abc_38_efg
+abc_39_efg
+abc_39_efg
+abc_3_efg
+abc_3_efg
+abc_40_efg
+abc_40_efg
+abc_41_efg
+abc_41_efg
+abc_42_efg
+abc_42_efg
+abc_43_efg
+abc_43_efg
+abc_44_efg
+abc_44_efg
+abc_45_efg
+abc_45_efg
+abc_46_efg
+abc_46_efg
+abc_47_efg
+abc_47_efg
+abc_48_efg
+abc_48_efg
+abc_49_efg
+abc_49_efg
+abc_4_efg
+abc_4_efg
+abc_50_efg
+abc_50_efg
+abc_51_efg
+abc_51_efg
+abc_52_efg
+abc_52_efg
+abc_53_efg
+abc_53_efg
+abc_54_efg
+abc_54_efg
+abc_55_efg
+abc_55_efg
+abc_56_efg
+abc_56_efg
+abc_57_efg
+abc_57_efg
+abc_58_efg
+abc_58_efg
+abc_59_efg
+abc_59_efg
+abc_5_efg
+abc_5_efg
+abc_60_efg
+abc_60_efg
+abc_61_efg
+abc_61_efg
+abc_62_efg
+abc_62_efg
+abc_63_efg
+abc_63_efg
+abc_64_efg
+abc_64_efg
+abc_65_efg
+abc_65_efg
+abc_66_efg
+abc_66_efg
+abc_67_efg
+abc_67_efg
+abc_68_efg
+abc_68_efg
+abc_69_efg
+abc_69_efg
+abc_6_efg
+abc_6_efg
+abc_70_efg
+abc_70_efg
+abc_71_efg
+abc_71_efg
+abc_72_efg
+abc_72_efg
+abc_73_efg
+abc_73_efg
+abc_74_efg
+abc_74_efg
+abc_75_efg
+abc_75_efg
+abc_76_efg
+abc_76_efg
+abc_77_efg
+abc_77_efg
+abc_78_efg
+abc_78_efg
+abc_79_efg
+abc_79_efg
+abc_7_efg
+abc_7_efg
+abc_80_efg
+abc_80_efg
+abc_81_efg
+abc_81_efg
+abc_82_efg
+abc_82_efg
+abc_83_efg
+abc_83_efg
+abc_84_efg
+abc_84_efg
+abc_85_efg
+abc_85_efg
+abc_86_efg
+abc_86_efg
+abc_87_efg
+abc_87_efg
+abc_88_efg
+abc_88_efg
+abc_89_efg
+abc_89_efg
+abc_8_efg
+abc_8_efg
+abc_90_efg
+abc_90_efg
+abc_91_efg
+abc_91_efg
+abc_92_efg
+abc_92_efg
+abc_93_efg
+abc_93_efg
+abc_94_efg
+abc_94_efg
+abc_95_efg
+abc_95_efg
+abc_96_efg
+abc_96_efg
+abc_97_efg
+abc_97_efg
+abc_98_efg
+abc_98_efg
+abc_99_efg
+abc_99_efg
+abc_9_efg
+abc_9_efg
diff --git a/db-4.8.30/test/scr017/O.R b/db-4.8.30/test/scr017/O.R
new file mode 100644
index 0000000..d78a047
--- /dev/null
+++ b/db-4.8.30/test/scr017/O.R
@@ -0,0 +1,196 @@
+1
+abc_1_efg
+2
+abc_2_efg
+3
+abc_3_efg
+4
+abc_4_efg
+5
+abc_5_efg
+6
+abc_6_efg
+7
+abc_7_efg
+8
+abc_8_efg
+9
+abc_9_efg
+10
+abc_10_efg
+11
+abc_11_efg
+12
+abc_12_efg
+13
+abc_13_efg
+14
+abc_14_efg
+15
+abc_15_efg
+16
+abc_16_efg
+17
+abc_17_efg
+18
+abc_18_efg
+19
+abc_19_efg
+20
+abc_20_efg
+21
+abc_21_efg
+22
+abc_22_efg
+23
+abc_23_efg
+24
+abc_24_efg
+25
+abc_25_efg
+26
+abc_26_efg
+27
+abc_27_efg
+28
+abc_28_efg
+29
+abc_29_efg
+30
+abc_30_efg
+31
+abc_31_efg
+32
+abc_32_efg
+33
+abc_33_efg
+34
+abc_34_efg
+35
+abc_36_efg
+36
+abc_37_efg
+37
+abc_38_efg
+38
+abc_39_efg
+39
+abc_40_efg
+40
+abc_41_efg
+41
+abc_42_efg
+42
+abc_43_efg
+43
+abc_44_efg
+44
+abc_45_efg
+45
+abc_46_efg
+46
+abc_47_efg
+47
+abc_48_efg
+48
+abc_49_efg
+49
+abc_50_efg
+50
+abc_51_efg
+51
+abc_52_efg
+52
+abc_53_efg
+53
+abc_54_efg
+54
+abc_55_efg
+55
+abc_56_efg
+56
+abc_57_efg
+57
+abc_58_efg
+58
+abc_59_efg
+59
+abc_60_efg
+60
+abc_61_efg
+61
+abc_62_efg
+62
+abc_63_efg
+63
+abc_64_efg
+64
+abc_65_efg
+65
+abc_66_efg
+66
+abc_67_efg
+67
+abc_68_efg
+68
+abc_69_efg
+69
+abc_70_efg
+70
+abc_71_efg
+71
+abc_72_efg
+72
+abc_73_efg
+73
+abc_74_efg
+74
+abc_75_efg
+75
+abc_76_efg
+76
+abc_77_efg
+77
+abc_78_efg
+78
+abc_79_efg
+79
+abc_80_efg
+80
+abc_81_efg
+81
+abc_82_efg
+82
+abc_83_efg
+83
+abc_84_efg
+84
+abc_85_efg
+85
+abc_86_efg
+86
+abc_87_efg
+87
+abc_88_efg
+88
+abc_89_efg
+89
+abc_90_efg
+90
+abc_91_efg
+91
+abc_92_efg
+92
+abc_93_efg
+93
+abc_94_efg
+94
+abc_95_efg
+95
+abc_96_efg
+96
+abc_97_efg
+97
+abc_98_efg
+98
+abc_99_efg
diff --git a/db-4.8.30/test/scr017/chk.db185 b/db-4.8.30/test/scr017/chk.db185
new file mode 100644
index 0000000..d868b69
--- /dev/null
+++ b/db-4.8.30/test/scr017/chk.db185
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure we can run DB 1.85 code.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+# set -enable-compat185 when configure to get proper h files.
+# if compile on linux blade server, add -pthread on cc
+CINC=-I..
+[ `uname` = "Linux" ] && CINC="$CINC -pthread"
+
+if cc -g -Wall $CINC t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+# Clean up so the next test doesn't get confused.
+rm -rf *
+
+exit 0
diff --git a/db-4.8.30/test/scr017/t.c b/db-4.8.30/test/scr017/t.c
new file mode 100644
index 0000000..b349de0
--- /dev/null
+++ b/db-4.8.30/test/scr017/t.c
@@ -0,0 +1,194 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_185.h"
+
+void err(char *);
+int mycmp(const DBT *, const DBT *);
+void ops(DB *, int);
+
+int
+main()
+{
+ DB *dbp;
+ HASHINFO h_info;
+ BTREEINFO b_info;
+ RECNOINFO r_info;
+
+ printf("\tBtree...\n");
+ memset(&b_info, 0, sizeof(b_info));
+ b_info.flags = R_DUP;
+ b_info.cachesize = 100 * 1024;
+ b_info.psize = 512;
+ b_info.lorder = 4321;
+ b_info.compare = mycmp;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
+ err("dbopen: btree");
+ ops(dbp, DB_BTREE);
+
+ printf("\tHash...\n");
+ memset(&h_info, 0, sizeof(h_info));
+ h_info.bsize = 512;
+ h_info.ffactor = 6;
+ h_info.nelem = 1000;
+ h_info.cachesize = 100 * 1024;
+ h_info.lorder = 1234;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
+ err("dbopen: hash");
+ ops(dbp, DB_HASH);
+
+ printf("\tRecno...\n");
+ memset(&r_info, 0, sizeof(r_info));
+ r_info.flags = R_FIXEDLEN;
+ r_info.cachesize = 100 * 1024;
+ r_info.psize = 1024;
+ r_info.reclen = 37;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
+ err("dbopen: recno");
+ ops(dbp, DB_RECNO);
+
+ return (0);
+}
+
+int
+mycmp(a, b)
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+void
+ops(dbp, type)
+ DB *dbp;
+ int type;
+{
+ FILE *outfp;
+ DBT key, data;
+ recno_t recno;
+ int i, ret;
+ char buf[64];
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ for (i = 1; i < 100; ++i) { /* Test DB->put. */
+ sprintf(buf, "abc_%d_efg", i);
+ if (type == DB_RECNO) {
+ recno = i;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = data.data = buf;
+ key.size = data.size = strlen(buf);
+ }
+
+ data.data = buf;
+ data.size = strlen(buf);
+ if (dbp->put(dbp, &key, &data, 0))
+ err("DB->put");
+ }
+
+ if (type == DB_RECNO) { /* Test DB->get. */
+ recno = 97;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 97);
+ if (dbp->get(dbp, &key, &data, 0) != 0)
+ err("DB->get");
+ if (memcmp(data.data, buf, strlen(buf)))
+ err("DB->get: wrong data returned");
+
+ if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
+ recno = 42;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 42);
+ if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
+ err("DB->put: no-overwrite succeeded");
+
+ if (type == DB_RECNO) { /* Test DB->del. */
+ recno = 35;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ sprintf(buf, "abc_%d_efg", 35);
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ if (dbp->del(dbp, &key, 0))
+ err("DB->del");
+
+ /* Test DB->seq. */
+ if ((outfp = fopen("output", "w")) == NULL)
+ err("fopen: output");
+ while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
+ if (type == DB_RECNO)
+ fprintf(outfp, "%d\n", *(int *)key.data);
+ else
+ fprintf(outfp,
+ "%.*s\n", (int)key.size, (char *)key.data);
+ fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
+ }
+ if (ret != 1)
+ err("DB->seq");
+ fclose(outfp);
+ switch (type) {
+ case DB_BTREE:
+ ret = system("cmp output O.BH");
+ break;
+ case DB_HASH:
+ ret = system("sort output | cmp - O.BH");
+ break;
+ case DB_RECNO:
+ ret = system("cmp output O.R");
+ break;
+ }
+ if (ret != 0)
+ err("output comparison failed");
+
+ if (dbp->sync(dbp, 0)) /* Test DB->sync. */
+ err("DB->sync");
+
+ if (dbp->close(dbp)) /* Test DB->close. */
+ err("DB->close");
+}
+
+void
+err(s)
+ char *s;
+{
+ fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
+ exit(EXIT_FAILURE);
+}
diff --git a/db-4.8.30/test/scr018/chk.comma b/db-4.8.30/test/scr018/chk.comma
new file mode 100644
index 0000000..ecfd929
--- /dev/null
+++ b/db-4.8.30/test/scr018/chk.comma
@@ -0,0 +1,30 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Look for trailing commas in declarations. Some compilers can't handle:
+# enum {
+# foo,
+# bar,
+# };
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t ../../*/*.[ch] ../../*/*.in; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr018/t.c b/db-4.8.30/test/scr018/t.c
new file mode 100644
index 0000000..3defd58
--- /dev/null
+++ b/db-4.8.30/test/scr018/t.c
@@ -0,0 +1,53 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int
+chk(f)
+ char *f;
+{
+ int ch, l, r;
+
+ if (freopen(f, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s\n", f, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ for (l = 1, r = 0; (ch = getchar()) != EOF;) {
+ if (ch != ',')
+ goto next;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '\n')
+ goto next;
+ ++l;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '}')
+ goto next;
+ r = 1;
+ printf("%s: line %d\n", f, l);
+
+next: if (ch == '\n')
+ ++l;
+ }
+ return (r);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int r;
+
+ for (r = 0; *++argv != NULL;)
+ if (chk(*argv))
+ r = 1;
+ return (r);
+}
diff --git a/db-4.8.30/test/scr019/chk.include b/db-4.8.30/test/scr019/chk.include
new file mode 100644
index 0000000..5945e14
--- /dev/null
+++ b/db-4.8.30/test/scr019/chk.include
@@ -0,0 +1,49 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check for inclusion of files already included in db_int.h.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep -- '#include[ ]' $d/dbinc/db_int.in |
+sed -e '/[ ]db\.h'/d \
+ -e 's/^#include.//' \
+ -e 's/[<>"]//g' \
+ -e 's/[ ].*//' > $t1
+
+for i in `cat $t1`; do
+ (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch])
+done |
+sed -e '/^build/d' \
+ -e '/^db_codegen\/code_capi.c/d' \
+ -e '/^db_dump185/d' \
+ -e '/^dbinc\/mutex_int.h.*<sys\/mman.h>/d' \
+ -e '/^dbinc\/win_db.h:/d' \
+ -e '/^examples_c/d' \
+ -e '/^libdb_java.*errno.h/d' \
+ -e '/^libdb_java.*java_util.h/d' \
+ -e '/^libdb_java\/db_java_wrap.c/d' \
+ -e '/^mod_db4/d' \
+ -e '/^mutex\/tm.c/d' \
+ -e '/^os\/os_map.c.*<sys\/mman.h>/d' \
+ -e '/^perl/d' \
+ -e '/^php_db4/d' \
+ -e '/^test_/d' \
+ > $t2
+
+[ -s $t2 ] && {
+ echo 'FAIL: found extraneous includes in the source'
+ cat $t2
+ exit 1
+}
+exit 0
diff --git a/db-4.8.30/test/scr020/chk.inc b/db-4.8.30/test/scr020/chk.inc
new file mode 100644
index 0000000..d73e898
--- /dev/null
+++ b/db-4.8.30/test/scr020/chk.inc
@@ -0,0 +1,45 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check for inclusion of db_config.h after "const" or other includes.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' |
+ xargs egrep -l '#include.*db_config.h') | tee /tmp/o |
+ sed -e '/dbdemo.c$/d' \
+ -e '/db_java_wrap.c$/d' \
+ -e '/ex_apprec.c$/d' > $t1
+
+(for i in `cat $t1`; do
+ egrep -w 'db_config.h|const' /dev/null $d/$i | head -1
+done) > $t2
+
+if egrep const $t2 > /dev/null; then
+ echo 'FAIL: found const before include of db_config.h'
+ egrep const $t2
+ exit 1
+fi
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w '#include' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep -v db_config.h $t2 > /dev/null; then
+ echo 'FAIL: found includes before include of db_config.h'
+ egrep -v db_config.h $t2
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr021/chk.flags b/db-4.8.30/test/scr021/chk.flags
new file mode 100644
index 0000000..89f669f
--- /dev/null
+++ b/db-4.8.30/test/scr021/chk.flags
@@ -0,0 +1,174 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check flag name-spaces.
+
+d=../..
+t1=__1
+t2=__2
+
+if cc -g -Wall -I.. t.c -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t $d/*/*.[ch] $d/*/*.in > $t1; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+echo 'Checking "dbenv" variables with flags other than DB_ENV_XXX'
+grep 'dbenv,' $t1 |
+sed -e '/DB_ENV_/d' \
+ -e '/env_method.c.*, mapped_flags*)/d' \
+ -e '/env_region.c.*, flags_orig*)/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking DB_ENV_XXX flags with variables other than "dbenv"'
+grep 'DB_ENV_' $t1 |
+sed -e '/dbenv,/d' \
+ -e '/(dbenv),/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking "env" variables with flags other than ENV_XXX'
+grep '[^b]env,' $t1 |
+sed -e '/[^B]ENV_/d' \
+ -e '/env_method.c.*, mapped_flags*)/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking ENV_XXX flags with variables other than "env"'
+grep '[^A-Z_]ENV_' $t1 |
+sed -e '/[^b]env,/d' \
+ -e '/(env),/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking dbenv "verbose" field with flags other than DB_VERB_XXX'
+grep -- 'dbenv->verbose,' $t1 |
+sed -e '/DB_VERB_/d' \
+ -e '/env_method.c.*, which)/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking DB_VER_XXX flags with other than dbenv "verbose" field'
+grep -- 'DB_VERB_' $t1 |
+sed -e '/dbenv->verbose,/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking "db" variables with flags other than DB_AM_XXX'
+cp $t1 /tmp/_f
+grep 'dbp,' $t1 |
+sed -e '/DB_AM_/d' \
+ -e '/dbp, mapped_flag)/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking DB_AM_XXX flags with variables other than "db"'
+grep 'DB_AM_' $t1 |
+sed \
+ -e '/(&db,/d' \
+ -e '/(db,/d' \
+ -e '/_method.c:.*outflagsp,/d' \
+ -e '/partition.c:.*pdbp,/d' \
+ -e '/rep_backup.c:.*->flags,/d' \
+ -e /'rep_backup.c:.*->db_flags,/d' \
+ -e '/db.c:.*save_flags,/d' \
+ -e '/((*[ ]*db_rep->rep_db)*,/d' \
+ -e '/((*[ ]*dbc)*->dbp,/d' \
+ -e '/((*[ ]*dbc_arg->dbp)*,/d' \
+ -e '/((*[ ]*dbp)*,/d' \
+ -e '/((*[ ]*dbp)*->s_primary,/d' \
+ -e '/((D),/d' \
+ -e '/((sdbp),/d' \
+ -e '/(fdbp,/d' \
+ -e '/(file_dbp,/d' \
+ -e '/(ldbp,/d' \
+ -e '/(msgfp->db_flags,/d' \
+ -e '/(mdbp,/d' \
+ -e '/(pdbp,/d' \
+ -e '/(pginfo, /d' \
+ -e '/(rfp->db_flags,/d' \
+ -e '/(sdbp,/d' \
+ -e '/(subdbp,/d' \
+ -e '/fop_util.c:.*(t2dbp,/d' \
+ -e '/fop_util.c:.*(tmpdbp,/d' \
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking "dbc" variables flags with flags other than DBC_XXX'
+echo Checking DBC flags...
+cat $t1 |
+grep 'dbc,' |
+sed -e '/DBC_/d' \
+ -e '/db_cam.c:.*tmp_read_locking)/d'
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo 'Checking DBC_XXX flags with variables other than "dbc"'
+grep 'DBC_' $t1 |
+sed -e '/((*dbc)*,/d' \
+ -e '/(dbc_arg,/d' \
+ -e '/(dbc_c,/d' \
+ -e '/(dbc_n,/d' \
+ -e '/(dbc_orig,/d' \
+ -e '/(opd,/d' \
+ -e '/(pdbc,/d' \
+ -e '/[(*]put_state[p]*,/d' \
+ -e '/(sdbc,/d' \
+ -e '/partition.c:.*_dbc,/d' \
+ -e '/partition.c:.*_cursor,/d'
+ > $t2
+[ -s $t2 ] && {
+ cat $t2
+ exit 1
+}
+
+echo Checking for bad use of macros...
+egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1
+egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1
+[ -s $t1 ] && {
+ echo 'if statement followed by non-test macro'
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr021/t.c b/db-4.8.30/test/scr021/t.c
new file mode 100644
index 0000000..046c2f2
--- /dev/null
+++ b/db-4.8.30/test/scr021/t.c
@@ -0,0 +1,79 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+void
+chk(f, str)
+ char *f, *str;
+{
+ char *s;
+ int ch, l, ok, pc;
+
+ if (freopen(f, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s\n", f, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ for (l = 1, ok = 1, s = str; (ch = getchar()) != EOF;) {
+ if (ch == '\n')
+ ++l;
+ if (!ok || ch != *s) {
+ s = str;
+ ok = !isalpha(ch) && !isdigit(ch) && ch != '_';
+ continue;
+ }
+ if (*++s != '\0')
+ continue;
+
+ /* Match. */
+ printf("%s: %d: %s", f, l, str);
+ for (pc = 1; (ch = getchar()) != EOF;) {
+ switch (ch) {
+ case '(':
+ ++pc;
+ break;
+ case ')':
+ --pc;
+ break;
+ case '\n':
+ ++l;
+ break;
+ }
+ if (ch == '\n')
+ putchar(' ');
+ else
+ putchar(ch);
+ if (pc <= 0) {
+ putchar('\n');
+ break;
+ }
+ }
+ s = str;
+ }
+}
+
+int
+main(int argc, char *argv[])
+{
+ int r;
+
+ for (r = 0; *++argv != NULL;) {
+ chk(*argv, "FLD_CLR(");
+ chk(*argv, "FLD_ISSET(");
+ chk(*argv, "FLD_SET(");
+ chk(*argv, "F_CLR(");
+ chk(*argv, "F_ISSET(");
+ chk(*argv, "F_SET(");
+ }
+ return (0);
+}
diff --git a/db-4.8.30/test/scr022/chk.rr b/db-4.8.30/test/scr022/chk.rr
new file mode 100644
index 0000000..a9692d8
--- /dev/null
+++ b/db-4.8.30/test/scr022/chk.rr
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id$
+
+d=../..
+
+t1=__1
+
+# Check for DB_RUNRECOVERY being specified instead of a call to env_panic.
+egrep DB_RUNRECOVERY $d/*/*.c |
+ sed -e '/__env_panic(.*, DB_RUNRECOVERY)/d' \
+ -e '/\/php_db4\//d' \
+ -e '/case DB_RUNRECOVERY:/d' \
+ -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \
+ -e '/db_err.c:/d' \
+ -e '/env_open.c:.*ret = DB_RUNRECOVERY;/d' \
+ -e '/os_errno.c:.*evalue == DB_RUNRECOVERY/d' \
+ -e '/mut_fcntl.c:.*return (DB_RUNRECOVERY);/d' \
+ -e '/mut_pthread.c:.*return (DB_RUNRECOVERY);/d' \
+ -e '/mut_tas.c:.*return (DB_RUNRECOVERY);/d' \
+ -e '/mut_tas.c:.*Possibly DB_RUNRECOVERY if/d' \
+ -e '/mut_win32.c:.*return (DB_RUNRECOVERY);/d' \
+ -e '/mut_win32.c:.*ret = DB_RUNRECOVERY;/d' \
+ -e '/rep_backup.c:.*Panic the env and return DB_RUNRECOVERY/d' \
+ -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \
+ -e '/txn.c:.*returned DB_RUNRECOVERY and we need to/d' \
+ > $t1
+[ -s $t1 ] && {
+ echo "DB_RUNRECOVERY used; should be a call to env_panic."
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr023/chk.q b/db-4.8.30/test/scr023/chk.q
new file mode 100644
index 0000000..91bfa6c
--- /dev/null
+++ b/db-4.8.30/test/scr023/chk.q
@@ -0,0 +1,26 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure the queue macros pass our tests.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I../../dbinc q.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program q.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr023/q.c b/db-4.8.30/test/scr023/q.c
new file mode 100644
index 0000000..4b4943d
--- /dev/null
+++ b/db-4.8.30/test/scr023/q.c
@@ -0,0 +1,840 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+#include <sys/types.h>
+#include <sys/time.h>
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "queue.h"
+#include "shqueue.h"
+
+typedef enum {
+ FORWARD_WALK_FAILED = 1,
+ FOREACH_WALK_FAILED,
+ LIST_END_NOT_MARKED_FAILURE,
+ PREV_WALK_FAILED,
+ REVERSE_FOREACH_WALK_FAILED,
+ EXPECTED_HEAD_FAILED
+} FAILURE_REASON;
+
+const char *failure_reason_names[] = {
+ "",
+ "walking the list using the _NEXT forward failed",
+ "walking the list using the _FOREACH macro failed",
+ "what was expected to be the last element wasn't marked as such",
+ "walking the list using the _PREV macro failed",
+ "walking the list using the _REVERSE_FOREACH macro failed",
+ "expected to be at the head of the list"
+};
+
+SH_LIST_HEAD(sh_lq);
+struct sh_le {
+ char content;
+ SH_LIST_ENTRY sh_les;
+};
+
+/* create a string from the content of a list queue */
+char *
+sh_l_as_string(l)
+ struct sh_lq *l;
+{
+ static char buf[1024];
+ struct sh_le *ele = SH_LIST_FIRST(l, sh_le);
+ int i = 1;
+
+ buf[0] = '"';
+ while (ele != NULL) {
+ buf[i] = ele->content;
+ ele = SH_LIST_NEXT(ele, sh_les, sh_le);
+ if (ele != NULL)
+ buf[++i] = ' ';
+ i++;
+ }
+ buf[i++] = '"';
+ buf[i] = '\0';
+ return buf;
+}
+
+/* init a list queue */
+struct sh_lq *
+sh_l_init(items)
+ const char *items;
+{
+ const char *c = items;
+ struct sh_le *ele = NULL, *last_ele = (struct sh_le*)-1;
+ struct sh_lq *l = calloc(1, sizeof(struct sh_lq));
+
+ SH_LIST_INIT(l);
+
+ while (*c != '\0') {
+ if (c[0] != ' ') {
+ last_ele = ele;
+ ele = calloc(1, sizeof(struct sh_le));
+ ele->content = c[0];
+ if (SH_LIST_EMPTY(l))
+ SH_LIST_INSERT_HEAD(l, ele, sh_les, sh_le);
+ else
+ SH_LIST_INSERT_AFTER(
+ last_ele, ele, sh_les, sh_le);
+ }
+ c++;
+ }
+ return (l);
+}
+
+struct sh_lq *
+sh_l_remove_head(l)
+ struct sh_lq *l;
+{
+ struct sh_le *ele = SH_LIST_FIRST(l, sh_le);
+
+ SH_LIST_REMOVE_HEAD(l, sh_les, sh_le);
+ if (ele != NULL)
+ free(ele);
+
+ return (l);
+}
+
+struct sh_lq *
+sh_l_remove_tail(l)
+ struct sh_lq *l;
+{
+ struct sh_le *ele = SH_LIST_FIRST(l, sh_le);
+
+ if (SH_LIST_EMPTY(l))
+ return (l);
+
+ while (SH_LIST_NEXT(ele, sh_les, sh_le) != NULL)
+ ele = SH_LIST_NEXT(ele, sh_les, sh_le);
+
+ if (ele) {
+ SH_LIST_REMOVE(ele, sh_les, sh_le);
+ free(ele);
+ }
+ return (l);
+}
+
+struct sh_lq *
+sh_l_remove_item(l, item)
+ struct sh_lq *l;
+ const char *item;
+{
+ struct sh_le *ele = SH_LIST_FIRST(l, sh_le);
+
+ while (ele != NULL) {
+ if (ele->content == item[0])
+ break;
+ ele = SH_LIST_NEXT(ele, sh_les, sh_le);
+ }
+ if (ele)
+ SH_LIST_REMOVE(ele, sh_les, sh_le);
+ return (l);
+}
+
+struct sh_lq *
+sh_l_insert_head(l, item)
+ struct sh_lq *l;
+ const char *item;
+{
+ struct sh_le *ele = calloc(1, sizeof(struct sh_le));
+
+ ele->content = item[0];
+ SH_LIST_INSERT_HEAD(l, ele, sh_les, sh_le);
+
+ return (l);
+}
+
+struct sh_lq *
+sh_l_insert_tail(l, item)
+ struct sh_lq *l;
+ const char *item;
+{
+ struct sh_le *ele = NULL;
+ struct sh_le *last_ele = SH_LIST_FIRST(l, sh_le);
+
+ if (last_ele != NULL)
+ while (SH_LIST_NEXT(last_ele, sh_les, sh_le) != NULL)
+ last_ele = SH_LIST_NEXT(last_ele, sh_les, sh_le);
+
+ if (last_ele == NULL) {
+ ele = calloc(1, sizeof(struct sh_le));
+ ele->content = item[0];
+ SH_LIST_INSERT_HEAD(l, ele, sh_les, sh_le);
+ } else {
+ ele = calloc(1, sizeof(struct sh_le));
+ ele->content = item[0];
+ SH_LIST_INSERT_AFTER(last_ele, ele, sh_les, sh_le);
+ }
+
+ return (l);
+}
+
+struct sh_lq *
+sh_l_insert_before(l, item, before_item)
+ struct sh_lq *l;
+ const char *item;
+ const char *before_item;
+{
+ struct sh_le *ele = NULL;
+ struct sh_le *before_ele = SH_LIST_FIRST(l, sh_le);
+
+ while (before_ele != NULL) {
+ if (before_ele->content == before_item[0])
+ break;
+ before_ele = SH_LIST_NEXT(before_ele, sh_les, sh_le);
+ }
+ if (before_ele != NULL) {
+ ele = calloc(1, sizeof(struct sh_le));
+ ele->content = item[0];
+ SH_LIST_INSERT_BEFORE(l, before_ele, ele, sh_les, sh_le);
+ }
+ return (l);
+}
+
+struct sh_lq *
+sh_l_insert_after(l, item, after_item)
+ struct sh_lq *l;
+ const char *item;
+ const char *after_item;
+{
+ struct sh_le *ele = NULL;
+ struct sh_le *after_ele = SH_LIST_FIRST(l, sh_le);
+
+ while (after_ele != NULL) {
+ if (after_ele->content == after_item[0])
+ break;
+ after_ele = SH_LIST_NEXT(after_ele, sh_les, sh_le);
+ }
+ if (after_ele != NULL) {
+ ele = calloc(1, sizeof(struct sh_le));
+ ele->content = item[0];
+ SH_LIST_INSERT_AFTER(after_ele, ele, sh_les, sh_le);
+ }
+ return (l);
+}
+
+void
+sh_l_discard(l)
+ struct sh_lq *l;
+{
+ struct sh_le *ele = NULL;
+
+ while ((ele = SH_LIST_FIRST(l, sh_le)) != NULL) {
+ SH_LIST_REMOVE(ele, sh_les, sh_le);
+ free(ele);
+ }
+
+ free(l);
+}
+
+int
+sh_l_verify(l, items)
+ struct sh_lq *l;
+ const char *items;
+{
+ const char *c = items;
+ struct sh_le *ele = NULL, *lele = NULL;
+ int i = 0, nele = 0;
+
+ while (*c != '\0') {
+ if (c[0] != ' ')
+ nele++;
+ c++;
+ }
+
+ /* use the FOREACH macro to walk the list */
+ c = items;
+ i = 0;
+ SH_LIST_FOREACH(ele, l, sh_les, sh_le) {
+ if (ele->content != c[0])
+ return (FOREACH_WALK_FAILED);
+ i++;
+ c +=2;
+ }
+ if (i != nele)
+ return (FOREACH_WALK_FAILED);
+ i = 0;
+ if (items[0] != '\0') {
+ /* walk the list forward */
+ c = items;
+ ele = SH_LIST_FIRST(l, sh_le);
+ while (*c != '\0') {
+ lele = ele;
+ if (c[0] != ' ') {
+ if (ele->content != c[0])
+ return (FORWARD_WALK_FAILED);
+ i++;
+ ele = SH_LIST_NEXT(ele, sh_les, sh_le);
+ }
+ c++;
+ }
+ ele = lele;
+
+ if (i != nele)
+ return (FOREACH_WALK_FAILED);
+
+ /* ele should be the last element in the list... */
+ /* ... so sle_next should be -1 */
+ if (ele->sh_les.sle_next != -1)
+ return (LIST_END_NOT_MARKED_FAILURE);
+
+ /* and NEXT needs to be NULL */
+ if (SH_LIST_NEXT(ele, sh_les, sh_le) != NULL)
+ return (LIST_END_NOT_MARKED_FAILURE);
+
+ /*
+ * walk the list backwards using PREV macro, first move c
+ * back a bit
+ */
+ c--;
+ i = 0;
+ while (c >= items) {
+ if (c[0] != ' ') {
+ lele = ele;
+ if (ele->content != c[0])
+ return (PREV_WALK_FAILED);
+ ele = SH_LIST_PREV(ele, sh_les, sh_le);
+ i++;
+ }
+ c--;
+ }
+ ele = lele;
+
+ if (i != nele)
+ return (PREV_WALK_FAILED);
+
+ if (ele != SH_LIST_FIRST(l, sh_le))
+ return (EXPECTED_HEAD_FAILED);
+ }
+ return (0);
+}
+
+SH_TAILQ_HEAD(sh_tq);
+struct sh_te {
+ char content;
+ SH_TAILQ_ENTRY sh_tes;
+};
+
+/* create a string from the content of a list queue */
+char *
+sh_t_as_string(l)
+ struct sh_tq *l;
+{
+ static char buf[1024];
+ struct sh_te *ele = SH_TAILQ_FIRST(l, sh_te);
+ int i = 1;
+
+ buf[0] = '"';
+ while (ele != NULL) {
+ buf[i] = ele->content;
+ ele = SH_TAILQ_NEXT(ele, sh_tes, sh_te);
+ if (ele != NULL)
+ buf[++i] = ' ';
+ i++;
+ }
+ buf[i++] = '"';
+ buf[i] = '\0';
+ return (buf);
+}
+
+/* init a tail queue */
+struct sh_tq *
+sh_t_init(items)
+ const char *items;
+{
+ const char *c = items;
+ struct sh_te *ele = NULL, *last_ele = (struct sh_te*)-1;
+ struct sh_tq *l = calloc(1, sizeof(struct sh_tq));
+
+ SH_TAILQ_INIT(l);
+
+ while (*c != '\0') {
+ if (c[0] != ' ') {
+ ele = calloc(1, sizeof(struct sh_te));
+ ele->content = c[0];
+
+ if (SH_TAILQ_EMPTY(l))
+ SH_TAILQ_INSERT_HEAD(l, ele, sh_tes, sh_te);
+ else
+ SH_TAILQ_INSERT_AFTER(
+ l, last_ele, ele, sh_tes, sh_te);
+ last_ele = ele;
+ }
+ c++;
+ }
+ return (l);
+}
+
+struct sh_tq *
+sh_t_remove_head(l)
+ struct sh_tq *l;
+{
+ struct sh_te *ele = SH_TAILQ_FIRST(l, sh_te);
+
+ if (ele != NULL)
+ SH_TAILQ_REMOVE(l, ele, sh_tes, sh_te);
+
+ free(ele);
+
+ return (l);
+}
+
+struct sh_tq *
+sh_t_remove_tail(l)
+ struct sh_tq *l;
+{
+ struct sh_te *ele = SH_TAILQ_FIRST(l, sh_te);
+
+ if (SH_TAILQ_EMPTY(l))
+ return (l);
+
+ while (SH_TAILQ_NEXT(ele, sh_tes, sh_te) != NULL)
+ ele = SH_TAILQ_NEXT(ele, sh_tes, sh_te);
+
+ if (ele != NULL) {
+ SH_TAILQ_REMOVE(l, ele, sh_tes, sh_te);
+ free(ele);
+ }
+
+ return (l);
+}
+
+struct sh_tq *
+sh_t_remove_item(l, item)
+ struct sh_tq *l;
+ const char *item;
+{
+ struct sh_te *ele = SH_TAILQ_FIRST(l, sh_te);
+
+ while (ele != NULL) {
+ if (ele->content == item[0])
+ break;
+ ele = SH_TAILQ_NEXT(ele, sh_tes, sh_te);
+ }
+ if (ele != NULL)
+ SH_TAILQ_REMOVE(l, ele, sh_tes, sh_te);
+
+ return (l);
+}
+
+struct sh_tq *
+sh_t_insert_head(l, item)
+ struct sh_tq *l;
+ const char *item;
+{
+ struct sh_te *ele = calloc(1, sizeof(struct sh_te));
+
+ ele->content = item[0];
+ SH_TAILQ_INSERT_HEAD(l, ele, sh_tes, sh_te);
+
+ return (l);
+}
+
+struct sh_tq *
+sh_t_insert_tail(l, item)
+ struct sh_tq *l;
+ const char *item;
+{
+ struct sh_te *ele = 0;
+ ele = calloc(1, sizeof(struct sh_te));
+ ele->content = item[0];
+ SH_TAILQ_INSERT_TAIL(l, ele, sh_tes);
+ return l;
+}
+
+struct sh_tq *
+sh_t_insert_before(l, item, before_item)
+ struct sh_tq *l;
+ const char *item;
+ const char *before_item;
+{
+ struct sh_te *ele = NULL;
+ struct sh_te *before_ele = SH_TAILQ_FIRST(l, sh_te);
+
+ while (before_ele != NULL) {
+ if (before_ele->content == before_item[0])
+ break;
+ before_ele = SH_TAILQ_NEXT(before_ele, sh_tes, sh_te);
+ }
+
+ if (before_ele != NULL) {
+ ele = calloc(1, sizeof(struct sh_te));
+ ele->content = item[0];
+ SH_TAILQ_INSERT_BEFORE(l, before_ele, ele, sh_tes, sh_te);
+ }
+
+ return (l);
+}
+
+struct sh_tq *
+sh_t_insert_after(l, item, after_item)
+ struct sh_tq *l;
+ const char *item;
+ const char *after_item;
+{
+ struct sh_te *ele = NULL;
+ struct sh_te *after_ele = SH_TAILQ_FIRST(l, sh_te);
+
+ while (after_ele != NULL) {
+ if (after_ele->content == after_item[0])
+ break;
+ after_ele = SH_TAILQ_NEXT(after_ele, sh_tes, sh_te);
+ }
+
+ if (after_ele != NULL) {
+ ele = calloc(1, sizeof(struct sh_te));
+ ele->content = item[0];
+ SH_TAILQ_INSERT_AFTER(l, after_ele, ele, sh_tes, sh_te);
+ }
+
+ return (l);
+}
+
+void
+sh_t_discard(l)
+ struct sh_tq *l;
+{
+ struct sh_te *ele = NULL;
+
+ while ((ele = SH_TAILQ_FIRST(l, sh_te)) != NULL) {
+ SH_TAILQ_REMOVE(l, ele, sh_tes, sh_te);
+ free(ele);
+ }
+ free(l);
+}
+
+int
+sh_t_verify(l, items)
+ struct sh_tq *l;
+ const char *items;
+{
+ const char *c = items, *b = NULL;
+ struct sh_te *ele = NULL, *lele = NULL;
+ int i = 0, nele = 0;
+
+ while (*c != '\0') {
+ if (c[0] != ' ')
+ nele++;
+ c++;
+ }
+
+ /* use the FOREACH macro to walk the list */
+ c = items;
+ i = 0;
+ SH_TAILQ_FOREACH(ele, l, sh_tes, sh_te) {
+ if (ele->content != c[0])
+ return (FOREACH_WALK_FAILED);
+ i++;
+ c +=2;
+ }
+ if (i != nele)
+ return (FOREACH_WALK_FAILED);
+ i = 0;
+ if (items[0] != '\0') {
+ /* walk the list forward */
+ c = items;
+ ele = SH_TAILQ_FIRST(l, sh_te);
+ while (*c != '\0') {
+ lele = ele;
+ if (c[0] != ' ') {
+ if (ele->content != c[0])
+ return (FORWARD_WALK_FAILED);
+ i++;
+ ele = SH_TAILQ_NEXT(ele, sh_tes, sh_te);
+ }
+ c++;
+ }
+
+ if (i != nele)
+ return (FOREACH_WALK_FAILED);
+
+ if (lele != SH_TAILQ_LAST(l, sh_tes, sh_te))
+ return (LIST_END_NOT_MARKED_FAILURE);
+ ele = lele;
+
+ /* ele should be the last element in the list... */
+ /* ... so sle_next should be -1 */
+ if (ele->sh_tes.stqe_next != -1)
+ return (LIST_END_NOT_MARKED_FAILURE);
+
+ /* and NEXT needs to be NULL */
+ if (SH_TAILQ_NEXT(ele, sh_tes, sh_te) != NULL)
+ return (LIST_END_NOT_MARKED_FAILURE);
+
+ /* walk the list backwards using SH_LIST_PREV macro */
+ c--;
+ b = c;
+ i = 0;
+ while (c >= items) {
+ if (c[0] != ' ') {
+ lele = ele;
+ if (ele->content != c[0])
+ return (PREV_WALK_FAILED);
+ ele = SH_TAILQ_PREV(l, ele, sh_tes, sh_te);
+ i++;
+ }
+ c--;
+ }
+ ele = lele;
+
+ if (i != nele)
+ return (PREV_WALK_FAILED);
+
+ if (ele != SH_TAILQ_FIRST(l, sh_te))
+ return (-1);
+
+ /* c should be the last character in the array, walk backwards
+ from here using FOREACH_REVERSE and check the values again */
+ c = b;
+ i = 0;
+ ele = SH_TAILQ_LAST(l, sh_tes, sh_te);
+ SH_TAILQ_FOREACH_REVERSE(ele, l, sh_tes, sh_te) {
+ if (ele->content != c[0])
+ return (REVERSE_FOREACH_WALK_FAILED);
+ i++;
+ c -=2;
+ }
+ if (i != nele)
+ return (REVERSE_FOREACH_WALK_FAILED);
+ }
+ return (0);
+}
+
+int
+sh_t_verify_TAILQ_LAST(l, items)
+ struct sh_tq *l;
+ const char *items;
+{
+ const char *c = items;
+ struct sh_te *ele = NULL;
+
+ c = items;
+ while (*c != '\0') {
+ c++;
+ }
+ if (c == items) {
+ /* items is empty, so last should be NULL */
+ if (SH_TAILQ_LAST(l, sh_tes, sh_te) != NULL)
+ return (-1);
+ } else {
+ c--;
+ ele = SH_TAILQ_LAST(l, sh_tes, sh_te);
+ if (ele->content != c[0])
+ return (-1);
+ }
+ return (0);
+}
+
+typedef void *qds_t;
+struct {
+ const char *name;
+ qds_t *(*f_init)(const char *);
+ qds_t *(*f_remove_head)(qds_t *);
+ qds_t *(*f_remove_tail)(qds_t *);
+ qds_t *(*f_remove_item)(qds_t *, const char *);
+ qds_t *(*f_insert_head)(qds_t *, const char *);
+ qds_t *(*f_insert_tail)(qds_t *, const char *);
+ qds_t *(*f_insert_before)(qds_t *, const char *, const char *);
+ qds_t *(*f_insert_after)(qds_t *, const char *, const char *);
+ qds_t *(*f_discard)(qds_t *);
+ char *(*f_as_string)(qds_t *);
+ int (*f_verify)(qds_t *, const char *);
+} qfns[]= {
+{ "sh_list",
+ (qds_t*(*)(const char *))sh_l_init,
+ (qds_t*(*)(qds_t *))sh_l_remove_head,
+ (qds_t*(*)(qds_t *))sh_l_remove_tail,
+ (qds_t*(*)(qds_t *, const char *))sh_l_remove_item,
+ (qds_t*(*)(qds_t *, const char *))sh_l_insert_head,
+ (qds_t*(*)(qds_t *, const char *))sh_l_insert_tail,
+ (qds_t*(*)(qds_t *, const char *, const char *))sh_l_insert_before,
+ (qds_t*(*)(qds_t *, const char *, const char *))sh_l_insert_after,
+ (qds_t*(*)(qds_t *))sh_l_discard,
+ (char *(*)(qds_t *))sh_l_as_string,
+ (int(*)(qds_t *, const char *))sh_l_verify },
+{ "sh_tailq",
+ (qds_t*(*)(const char *))sh_t_init,
+ (qds_t*(*)(qds_t *))sh_t_remove_head,
+ (qds_t*(*)(qds_t *))sh_t_remove_tail,
+ (qds_t*(*)(qds_t *, const char *))sh_t_remove_item,
+ (qds_t*(*)(qds_t *, const char *))sh_t_insert_head,
+ (qds_t*(*)(qds_t *, const char *))sh_t_insert_tail,
+ (qds_t*(*)(qds_t *, const char *, const char *))sh_t_insert_before,
+ (qds_t*(*)(qds_t *, const char *, const char *))sh_t_insert_after,
+ (qds_t*(*)(qds_t *))sh_t_discard,
+ (char *(*)(qds_t *))sh_t_as_string,
+ (int(*)(qds_t *, const char *))sh_t_verify }
+};
+
+typedef enum {
+ INSERT_BEFORE,
+ INSERT_AFTER,
+ INSERT_HEAD,
+ INSERT_TAIL,
+ REMOVE_HEAD,
+ REMOVE_ITEM,
+ REMOVE_TAIL,
+} OP;
+
+const char *op_names[] = {
+ "INSERT_BEFORE",
+ "INSERT_AFTER",
+ "INSERT_HEAD",
+ "INSERT_TAIL",
+ "REMOVE_HEAD",
+ "REMOVE_ITEM",
+ "REMOVE_TAIL" };
+
+struct {
+ char *init; /* initial state. */
+ char *final; /* final state. */
+ char *elem; /* element to operate on */
+ char *insert; /* element to insert */
+ OP op; /* operation. */
+} ops[] = {
+
+ /* most operations on a empty list */
+ { "", "", NULL, NULL, REMOVE_HEAD },
+ { "", "", NULL, NULL, REMOVE_TAIL },
+ { "", "A", NULL, "A", INSERT_HEAD },
+ { "", "A", NULL, "A", INSERT_TAIL },
+
+ /* all operations on a one element list */
+ { "A", "", NULL, NULL, REMOVE_HEAD },
+ { "A", "", NULL, NULL, REMOVE_TAIL },
+ { "A", "", "A", NULL, REMOVE_ITEM },
+ { "B", "A B", NULL, "A", INSERT_HEAD },
+ { "A", "A B", NULL, "B", INSERT_TAIL },
+ { "B", "A B", "B", "A", INSERT_BEFORE },
+ { "A", "A B", "A", "B", INSERT_AFTER },
+
+ /* all operations on a two element list */
+ { "A B", "B", NULL, NULL, REMOVE_HEAD },
+ { "A B", "A", NULL, NULL, REMOVE_TAIL },
+ { "A B", "A", "B", NULL, REMOVE_ITEM },
+ { "A B", "B", "A", NULL, REMOVE_ITEM },
+ { "B C", "A B C", NULL, "A", INSERT_HEAD },
+ { "A B", "A B C", NULL, "C", INSERT_TAIL },
+ { "B C", "A B C", "B", "A", INSERT_BEFORE },
+ { "A C", "A B C", "C", "B", INSERT_BEFORE },
+ { "A C", "A B C", "A", "B", INSERT_AFTER },
+ { "A C", "A C B", "C", "B", INSERT_AFTER },
+
+ /* all operations on a three element list */
+
+ { "A B C", "B C", NULL, NULL, REMOVE_HEAD },
+ { "A B C", "A B", NULL, NULL, REMOVE_TAIL },
+ { "A B C", "A B", "C", NULL, REMOVE_ITEM },
+ { "A B C", "A C", "B", NULL, REMOVE_ITEM },
+ { "A B C", "B C", "A", NULL, REMOVE_ITEM },
+ { "B C D", "A B C D", NULL, "A", INSERT_HEAD },
+ { "A B C", "A B C D", NULL, "D", INSERT_TAIL },
+ { "A B C", "X A B C", "A", "X", INSERT_BEFORE },
+ { "A B C", "A X B C", "B", "X", INSERT_BEFORE },
+ { "A B C", "A B X C", "C", "X", INSERT_BEFORE },
+ { "A B C", "A X B C", "A", "X", INSERT_AFTER },
+ { "A B C", "A B X C", "B", "X", INSERT_AFTER },
+ { "A B C", "A B C X", "C", "X", INSERT_AFTER },
+};
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ void *list;
+ int fc, tc; /* tc is total count, fc is failed count */
+ int eval, i, t, result;
+
+ eval = 0;
+ for (t = 0; t < sizeof(qfns) / sizeof(qfns[0]); ++t) {
+ fc = tc = 0;
+ printf("TESTING: %s\n", qfns[t].name);
+
+ for (i = 0; i < sizeof(ops) / sizeof(ops[0]); i++) {
+ list = qfns[t].f_init(ops[i].init);
+ result = qfns[t].f_verify(list, ops[i].init);
+ if (result == 0) {
+ fc++;
+ putchar('.');
+ } else {
+ putchar('+'); /* + means failed before op */
+ printf("\nVerify failed: %s\n",
+ failure_reason_names[result]);
+ eval = 1;
+ }
+ if (!strcmp("sh_tailq", qfns[t].name)) {
+ result =
+ sh_t_verify_TAILQ_LAST(list, ops[i].init);
+ }
+#ifdef VERBOSE
+ printf("\ncase %d %s in %s init: \"%s\" desired: \"%s\" elem: \"%s\" insert: \"%s\"\n",
+ i, op_names[ops[i].op], qfns[t].name,
+ ops[i].init, ops[i].final,
+ ops[i].elem, ops[i].insert);
+ fflush(stdout);
+#endif
+ tc++;
+ switch (ops[i].op) {
+ case REMOVE_HEAD:
+ qfns[t].f_remove_head(list);
+ break;
+ case REMOVE_TAIL:
+ qfns[t].f_remove_tail(list);
+ break;
+ case REMOVE_ITEM:
+ qfns[t].f_remove_item(list, ops[i].elem);
+ break;
+ case INSERT_HEAD:
+ qfns[t].f_insert_head(list, ops[i].insert);
+ break;
+ case INSERT_TAIL:
+ qfns[t].f_insert_tail(list, ops[i].insert);
+ break;
+ case INSERT_BEFORE:
+ qfns[t].f_insert_before(
+ list, ops[i].insert, ops[i].elem);
+ break;
+ case INSERT_AFTER:
+ qfns[t].f_insert_after(
+ list, ops[i].insert, ops[i].elem);
+ break;
+ }
+ if (!strcmp("sh_tailq", op_names[ops[i].op])) {
+ result = sh_t_verify_TAILQ_LAST(list,
+ ops[i].final);
+ }
+ if (result == 0)
+ result = qfns[t].f_verify(list, ops[i].final);
+ if (result == 0) {
+ fc++;
+ putchar('.');
+ } else {
+ putchar('*'); /* * means failed after op */
+ printf("\ncase %d %s in %s init: \"%s\" desired: \"%s\" elem: \"%s\" insert: \"%s\" got: %s - %s\n",
+ i, op_names[ops[i].op], qfns[t].name,
+ ops[i].init, ops[i].final,
+ ops[i].elem, ops[i].insert,
+ qfns[t].f_as_string(list),
+ failure_reason_names[result]);
+ fflush(stdout);
+ eval = 1;
+ }
+
+ tc++;
+ qfns[t].f_discard(list);
+ }
+
+ printf("\t%0.2f%% passed (%d/%d).\n",
+ (((double)fc/tc) * 100), fc, tc);
+ }
+ return (eval);
+}
diff --git a/db-4.8.30/test/scr024/Makefile b/db-4.8.30/test/scr024/Makefile
new file mode 100644
index 0000000..fe1d0c3
--- /dev/null
+++ b/db-4.8.30/test/scr024/Makefile
@@ -0,0 +1,33 @@
+TESTCLASSES=\
+ ./src/com/sleepycat/bind/serial/test/*.java\
+ ./src/com/sleepycat/bind/test/*.java\
+ ./src/com/sleepycat/bind/tuple/test/*.java\
+ ./src/com/sleepycat/collections/test/*.java\
+ ./src/com/sleepycat/collections/test/serial/*.java\
+ ./src/com/sleepycat/util/test/*.java \
+ ./src/com/sleepycat/db/util/*.java
+
+TESTSERIALPATH=com/sleepycat/collections/test/serial/TestSerial
+
+all: dbtest.jar
+
+dbtest.jar: classesdir
+ # Compile the tests and build the test jar
+ javac -classpath "${DB_JAR}${CP_SEP}${REQUIRED_JARS}" \
+ -d ./classes ${TESTCLASSES}
+ jar cf ./dbtest.jar -C ./classes ./com/sleepycat
+ # Build the original version of TestSerial in the testserial directory
+ mkdir -p "testserial/${TESTSERIALPATH}"
+ cp "./src/${TESTSERIALPATH}.java.original" \
+ "./testserial/${TESTSERIALPATH}.java"
+ javac -classpath "${DB_JAR}${CP_SEP}${REQUIRED_JARS}" \
+ -d ./testserial "testserial/${TESTSERIALPATH}.java"
+
+classesdir:
+ [ -d ./classes ] || (mkdir ./classes)
+
+clean:
+ [ -d ./classes ] && rm -rf ./classes
+ [ -f ./dbtest.jar ] && rm ./dbtest.jar
+ [ -d ./testserial ] && rm -rf ./testserial
+
diff --git a/db-4.8.30/test/scr024/README b/db-4.8.30/test/scr024/README
new file mode 100644
index 0000000..1053cf6
--- /dev/null
+++ b/db-4.8.30/test/scr024/README
@@ -0,0 +1,51 @@
+This directory contains unit tests for the Java APIs that are common the BDB
+and BDB JE products: the bind, collections, persist (DPL) and util packages.
+
+These tests are run using Apache Ant and JUnit.
+
+External software requirements:
+
+ Java 1.5 or later.
+ The Sun JDK is normally used, but any compatible JVM should work.
+
+ Apache Ant 7.0 or later.
+ http://ant.apache.org/bindownload.cgi
+
+ JUnit 3.8.1 or 3.8.2 (later versions do not currently work)
+ http://prdownloads.sourceforge.net/junit/junit3.8.1.zip?download
+
+The bin directories of both Java and Ant must be in your executable path. The
+junit.jar file must be available to Ant. The simplest way to do this is to copy
+the junit.jar file into your Ant's lib directory.
+
+There are two configuration settings for specifying the DB release to be
+tested:
+
+ The release binaries must be in your system library path (LD_LIBRARY_PATH
+ on Unix).
+
+ The Java property db.jar is specified to override the default location,
+ which is the build_unix/db.jar file in this release directory.
+
+The following ant commands should be executed from this directory:
+
+ # To perform a clean build of the tests
+ ant clean build
+
+ # To build (if needed) and run the tests
+ ant test
+
+ # To test the examples
+ ant examples
+
+For example, here are commands to build and test in a local db directory.
+
+ cd ~/db/test/scr024
+ ant clean build
+ ant test
+
+Several output directories are creating when building and testing. These can
+be deleted with:
+
+ ant clean
+
diff --git a/db-4.8.30/test/scr024/build.xml b/db-4.8.30/test/scr024/build.xml
new file mode 100644
index 0000000..472ea0d
--- /dev/null
+++ b/db-4.8.30/test/scr024/build.xml
@@ -0,0 +1,415 @@
+<!-- $Id$ -->
+<project name="clover" default="build" basedir=".">
+
+ <property name="db" location="../.."/>
+ <property name="db.jar" location="${db}/build_unix/db.jar"/>
+ <property name="test.src" location="src"/>
+ <property name="examples.src" location="${db}/examples_java/src"/>
+ <property name="clover.initstring" location="reports/clover.db"/>
+ <property name="clover.excludes" value="**/test/** collections/** db/** com/sleepycat/db/**"/>
+ <!--
+ <property name="build.compiler"
+ value="org.apache.tools.ant.taskdefs.CloverCompilerAdapter"/>
+ -->
+
+ <target name="all" depends="clean,test,report"/>
+
+ <target name="clean">
+ <delete dir="classes"/>
+ <delete dir="tmp"/>
+ <delete dir="reports"/>
+ <delete dir="original"/>
+ <delete dir="evolved"/>
+ <delete dir="testevolvedir"/>
+ <delete dir="testserialdir"/>
+ <delete dir="testenhancedir"/>
+ </target>
+
+ <target name="init">
+ <mkdir dir="classes"/>
+ <mkdir dir="tmp"/>
+ <mkdir dir="reports"/>
+ </target>
+
+ <path id="clover.classpath">
+ <pathelement location="clover.jar"/>
+ <pathelement location="velocity.jar"/>
+ </path>
+
+ <path id="classpath">
+ <pathelement location="${db.jar}"/>
+ <pathelement location="classes"/>
+ <pathelement location="clover.jar"/>
+ <path refid="clover.classpath"/>
+ </path>
+
+ <path id="enhanced.classpath">
+ <pathelement location="${db.jar}"/>
+ <pathelement location="testenhancedir"/>
+ </path>
+
+ <target name="build" depends="init">
+ <echo message="Using db.jar: ${db.jar}"/>
+ <javac destdir="classes" debug="on" source="1.5" target="1.5">
+ <classpath refid="classpath"/>
+ <src path="${test.src}"/>
+ <src path="${examples.src}"/>
+ <exclude name="com/sleepycat/**/release/**"/>
+ </javac>
+ <!-- Compile original version of TestSerial.java.original. -->
+ <property name="testserialpath"
+ value="com/sleepycat/collections/test/serial/TestSerial"/>
+ <copy file="${test.src}/${testserialpath}.java.original"
+ tofile="testserialdir/${testserialpath}.java"/>
+ <javac destdir="testserialdir" debug="on" source="1.5" target="1.5"
+ includeAntRuntime="true" srcdir="testserialdir">
+ <include name="${testserialpath}.java"/>
+ <classpath refid="classpath"/>
+ </javac>
+ <!-- Compile original version of EvolveClasses. -->
+ <copy file=
+ "${test.src}/com/sleepycat/persist/test/EvolveClasses.java.original"
+ tofile=
+ "testevolvedir/com/sleepycat/persist/test/EvolveClasses.java"/>
+ <copy file=
+ "${test.src}/com/sleepycat/persist/test/EvolveCase.java"
+ tofile=
+ "testevolvedir/com/sleepycat/persist/test/EvolveCase.java"/>
+ <copy file=
+ "${test.src}/com/sleepycat/persist/test/PersistTestUtils.java"
+ tofile=
+ "testevolvedir/com/sleepycat/persist/test/PersistTestUtils.java"/>
+ <javac debug="on" source="1.5" target="1.5">
+ <src path="testevolvedir"/>
+ <classpath refid="classpath"/>
+ </javac>
+ </target>
+
+ <target name="test" depends="build">
+
+ <!-- Determine which tests to run. -->
+ <condition property="dotestserial">
+ <or>
+ <not><isset property="testcase"/></not>
+ <equals arg1="${testcase}" arg2=
+ "com.sleepycat.collections.test.serial.StoredClassCatalogTest"/>
+ </or>
+ </condition>
+ <condition property="dotestevolve">
+ <or>
+ <not><isset property="testcase"/></not>
+ <equals arg1="${testcase}"
+ arg2="com.sleepycat.persist.test.EvolveTest"/>
+ </or>
+ </condition>
+
+ <!-- Performs initialization needed before StoredClassCatalogTest. -->
+ <junit fork="yes" dir="." printsummary="on" haltonfailure="on"
+ showoutput="on">
+ <jvmarg value="-ea"/>
+ <classpath path="testserialdir"/> <!-- Must be first -->
+ <classpath refid="classpath"/>
+ <sysproperty key="testdestdir" value="./tmp"/>
+ <sysproperty key="longtest" value="${longtest}"/>
+ <formatter type="plain" usefile="false"/>
+ <formatter type="xml"/>
+ <test name=
+ "com.sleepycat.collections.test.serial.StoredClassCatalogTestInit"
+ todir="reports" if="dotestserial"/>
+ </junit>
+
+ <!-- Performs initialization needed before persist EvolveTest. -->
+ <junit fork="yes" dir="." printsummary="on" haltonfailure="on"
+ showoutput="on">
+ <jvmarg value="-ea"/>
+ <classpath path="testevolvedir"/>
+ <classpath refid="classpath"/>
+ <sysproperty key="testdestdir" value="./tmp"/>
+ <sysproperty key="longtest" value="${longtest}"/>
+ <formatter type="plain" usefile="false"/>
+ <formatter type="xml"/>
+ <test name="com.sleepycat.persist.test.EvolveTestInit"
+ todir="reports" if="dotestevolve"/>
+ </junit>
+
+ <!-- Performs testcase if set, or batch tests. -->
+ <junit fork="yes" dir="." printsummary="on" haltonfailure="on"
+ showoutput="on">
+ <jvmarg value="-ea"/>
+ <classpath refid="classpath"/>
+ <sysproperty key="testdestdir" value="./tmp"/>
+ <sysproperty key="longtest" value="${longtest}"/>
+ <formatter type="plain" usefile="false"/>
+ <formatter type="xml"/>
+ <test name="${testcase}" todir="reports" if="testcase"/>
+ <batchtest todir="reports" unless="testcase">
+ <fileset dir="classes" includes="**/*Test.class"/>
+ </batchtest>
+ </junit>
+
+ <!-- Run the persist tests with -javaagent to enhance classes as they are
+ loaded. In this, and the following two tests with enhanced classes,
+ set the expectEnhanced system property to cause the test to fail if
+ classes are not enhanced. -->
+ <echo message="Run persist tests with -javaagent"/>
+ <junit fork="yes" dir="." printsummary="on" haltonfailure="on"
+ showoutput="on">
+ <jvmarg value="-ea"/>
+ <jvmarg value="-javaagent:${db.jar}=enhance:-v,com.sleepycat.persist"/>
+ <classpath refid="classpath"/>
+ <sysproperty key="testdestdir" value="./tmp"/>
+ <sysproperty key="longtest" value="${longtest}"/>
+ <sysproperty key="expectEnhanced" value="true"/>
+ <formatter type="plain" usefile="false"/>
+ <formatter type="xml"/>
+ <batchtest todir="reports" unless="testcase">
+ <fileset dir="classes"
+ includes="com/sleepycat/persist/**/*Test.class"/>
+ </batchtest>
+ </junit>
+
+ <!-- Enhance persist test classes by rewriting the class files with the
+ enhancer ant task, and run the persist tests again. Compile
+ ClassEnhancerTask here. It is not compiled into db.jar because that
+ would create a primary build dependency on the Ant libraries. -->
+ <echo message="Run persist tests with ClassEnhancer ant task"/>
+ <delete dir="testenhancedir"/>
+ <mkdir dir="testenhancedir"/>
+ <copy todir="testenhancedir">
+ <fileset dir="classes" includes="com/sleepycat/persist/**/*.class"/>
+ </copy>
+ <javac destdir="testenhancedir" debug="on" source="1.5" target="1.5"
+ includeAntRuntime="true" srcdir="${db}/java/src">
+ <include name="com/sleepycat/persist/model/ClassEnhancerTask.java"/>
+ <classpath refid="classpath"/>
+ </javac>
+ <taskdef name="enhancer"
+ classname="com.sleepycat.persist.model.ClassEnhancerTask">
+ <classpath refid="enhanced.classpath"/>
+ </taskdef>
+ <enhancer verbose="on">
+ <fileset dir="testenhancedir"/>
+ </enhancer>
+ <junit fork="yes" dir="." printsummary="on" haltonfailure="on"
+ showoutput="on">
+ <jvmarg value="-ea"/>
+ <classpath refid="enhanced.classpath"/>
+ <classpath refid="classpath"/>
+ <sysproperty key="testdestdir" value="./tmp"/>
+ <sysproperty key="longtest" value="${longtest}"/>
+ <sysproperty key="expectEnhanced" value="true"/>
+ <formatter type="plain" usefile="false"/>
+ <formatter type="xml"/>
+ <batchtest todir="reports" unless="testcase">
+ <fileset dir="classes"
+ includes="com/sleepycat/persist/**/*Test.class"/>
+ </batchtest>
+ </junit>
+
+ <!-- Enhance persist test classes by rewriting the class files with the
+ ClassEnhancer main program, and run the persist tests again. -->
+ <echo message="Run persist tests with ClassEnhancer main program"/>
+ <delete dir="testenhancedir"/>
+ <mkdir dir="testenhancedir"/>
+ <copy todir="testenhancedir">
+ <fileset dir="classes" includes="com/sleepycat/persist/**/*.class"/>
+ </copy>
+ <java fork="yes" failonerror="true"
+ classname="com.sleepycat.persist.model.ClassEnhancer">
+ <arg line="-v testenhancedir"/>
+ <classpath refid="classpath"/>
+ </java>
+ <junit fork="yes" dir="." printsummary="on" haltonfailure="on"
+ showoutput="on">
+ <jvmarg value="-ea"/>
+ <classpath refid="enhanced.classpath"/>
+ <classpath refid="classpath"/>
+ <sysproperty key="testdestdir" value="./tmp"/>
+ <sysproperty key="longtest" value="${longtest}"/>
+ <sysproperty key="expectEnhanced" value="true"/>
+ <formatter type="plain" usefile="false"/>
+ <formatter type="xml"/>
+ <batchtest todir="reports" unless="testcase">
+ <fileset dir="classes"
+ includes="com/sleepycat/persist/**/*Test.class"/>
+ </batchtest>
+ </junit>
+
+ </target>
+
+ <!-- examples runs all examples, but does not include access_example because
+ it is interactive.
+ -->
+ <target name="examples" depends="build">
+ <echo message="=== HelloDatabaseWorld ==="/>
+ <java dir="." fork="yes" classpathref="classpath" failonerror="true"
+ classname="collections.hello.HelloDatabaseWorld"/>
+ <echo message=""/>
+ <antcall target="one_shipment_example">
+ <param name="param_name" value="basic"/>
+ </antcall>
+ <antcall target="one_shipment_example">
+ <param name="param_name" value="index"/>
+ </antcall>
+ <antcall target="one_shipment_example">
+ <param name="param_name" value="entity"/>
+ </antcall>
+ <antcall target="one_shipment_example">
+ <param name="param_name" value="tuple"/>
+ </antcall>
+ <antcall target="one_shipment_example">
+ <param name="param_name" value="sentity"/>
+ </antcall>
+ <antcall target="one_shipment_example">
+ <param name="param_name" value="marshal"/>
+ </antcall>
+ <antcall target="one_shipment_example">
+ <param name="param_name" value="factory"/>
+ </antcall>
+ <antcall target="one_persist_example">
+ <param name="param_name" value="CustomKeyOrderExample"/>
+ </antcall>
+ <antcall target="one_persist_example">
+ <param name="param_name" value="EventExample"/>
+ </antcall>
+ <antcall target="one_persist_example">
+ <param name="param_name" value="EventExampleDPL"/>
+ </antcall>
+ <antcall target="one_persist_example">
+ <param name="param_name" value="PersonExample"/>
+ </antcall>
+ <antcall target="DplDump">
+ <param name="home" value="tmp"/>
+ <param name="store" value="PersonStore"/>
+ </antcall>
+ <antcall target="db-gettingStarted"/>
+ <antcall target="db-txn"/>
+ <antcall target="persist-gettingStarted"/>
+ <antcall target="persist-txn"/>
+ </target>
+
+ <target name="one_shipment_example">
+ <echo message="=== ${param_name} ==="/>
+ <delete dir="tmp"/>
+ <mkdir dir="tmp"/>
+ <java dir="." fork="yes" classpathref="classpath" failonerror="true"
+ classname="collections.ship.${param_name}.Sample"/>
+ </target>
+
+ <target name="one_persist_example">
+ <echo message="=== ${param_name} ==="/>
+ <delete dir="tmp"/>
+ <mkdir dir="tmp"/>
+ <java fork="yes" dir="." classname="persist.${param_name}"
+ failonerror="true">
+ <jvmarg value="-ea"/>
+ <arg line="-h tmp"/>
+ <classpath refid="classpath"/>
+ </java>
+ </target>
+
+ <!--
+ Before running this example, first run another DPL example that writes to the
+ home directory. Note that we do not clear the home directory here, in order
+ to use the DB in the home directory from a previous run.
+ -->
+ <target name="DplDump">
+ <echo message="=== DplDump ${home} ${store} ==="/>
+ <java fork="yes" dir="." classname="persist.DplDump" failonerror="true">
+ <jvmarg value="-ea"/>
+ <arg line="-h ${home} -s ${store}"/>
+ <classpath refid="classpath"/>
+ </java>
+ </target>
+
+ <target name="access_example" depends="build">
+ <echo message="=== AccessExample ==="/>
+ <java fork="yes" dir="." classpathref="classpath" failonerror="true"
+ classname="collections.access.AccessExample">
+ </java>
+ </target>
+
+ <!-- Test GSG examples -->
+
+ <target name="db-gettingStarted" depends="build">
+ <delete dir="tmp"/>
+ <mkdir dir="tmp"/>
+ <copy todir="tmp">
+ <fileset dir="${examples.src}/db/GettingStarted">
+ <include name="*.txt"/>
+ <exclude name="*.java"/>
+ </fileset>
+ </copy>
+ <java fork="yes" dir="tmp"
+ classname="db.GettingStarted.ExampleDatabaseLoad"
+ failonerror="true">
+ <arg line="-h ."/>
+ <classpath refid="classpath"/>
+ </java>
+ <java fork="yes" dir="tmp"
+ classname="db.GettingStarted.ExampleDatabaseRead"
+ failonerror="true">
+ <arg line="-h ."/>
+ <classpath refid="classpath"/>
+ </java>
+ <delete file="tmp/inventory.txt"/>
+ <delete file="tmp/vendors.txt"/>
+ </target>
+
+ <target name="db-txn" depends="build">
+ <delete dir="tmp"/>
+ <mkdir dir="tmp"/>
+ <java fork="yes" dir="."
+ classname="db.txn.TxnGuide"
+ failonerror="true">
+ <arg line="-h tmp"/>
+ <classpath refid="classpath"/>
+ </java>
+ </target>
+
+ <target name="persist-gettingStarted" depends="build">
+ <delete dir="tmp"/>
+ <mkdir dir="tmp"/>
+ <mkdir dir="tmp/JEDB"/>
+ <copy todir="tmp">
+ <fileset dir="${examples.src}/persist/gettingStarted">
+ <include name="*.txt"/>
+ <exclude name="*.java"/>
+ </fileset>
+ </copy>
+ <java fork="yes" dir="tmp"
+ classname="persist.gettingStarted.SimpleStorePut"
+ failonerror="true">
+ <arg line="-h ."/>
+ <classpath refid="classpath"/>
+ </java>
+ <java fork="yes" dir="tmp"
+ classname="persist.gettingStarted.SimpleStoreGet"
+ failonerror="true">
+ <arg line="-h ."/>
+ <classpath refid="classpath"/>
+ </java>
+ <delete file="tmp/inventory.txt"/>
+ <delete file="tmp/vendors.txt"/>
+ </target>
+
+ <target name="persist-txn" depends="build">
+ <delete dir="tmp"/>
+ <mkdir dir="tmp"/>
+ <java fork="yes" dir="."
+ classname="persist.txn.TxnGuideDPL"
+ failonerror="true">
+ <arg line="-h tmp"/>
+ <classpath refid="classpath"/>
+ </java>
+ </target>
+
+ <target name="report">
+ <java classname="com.cortexeb.tools.clover.reporters.html.HtmlReporter"
+ failonerror="true" fork="true">
+ <arg line="--outputdir reports --showSrc --initstring ${clover.initstring} --title 'Berkeley DB Java BDB API'"/>
+ <classpath refid="clover.classpath"/>
+ </java>
+ </target>
+
+</project>
diff --git a/db-4.8.30/test/scr024/chk.bdb b/db-4.8.30/test/scr024/chk.bdb
new file mode 100644
index 0000000..3353525
--- /dev/null
+++ b/db-4.8.30/test/scr024/chk.bdb
@@ -0,0 +1,81 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Run the collections/bind test suite.
+
+# NOTES:
+# This test requires one JAR not included with the Berkeley DB
+# distribution: JUnit (junit.jar) I've been using the 8/31/2002 version
+# of JUnit. You can download this JAR from http://jakarta.apache.org/
+#
+# JUNIT_JAR=/Users/gburd/Unix/opt/junit/junit.jar
+
+[ "x$JUNIT_JAR" = "x" ] && {
+ echo 'FAIL: unset environment variable JUNIT_JAR for junit.jar.'
+ exit 1
+}
+
+[ -f $JUNIT_JAR ] || {
+ echo 'FAIL: JUNIT_JAR not a valid path to the junit.jar.'
+ exit 1
+}
+
+case `uname` in
+ *CYGWIN*)
+ d=../../build_windows/Win32/Debug
+ DB_LIB_DIR="$d"
+ REQUIRED_JARS="`cygpath -m $JUNIT_JAR`"
+ CP_SEP=";"
+ PATH="../../build_windows/Win32/Debug:$PATH"
+ export PATH;;
+ *)
+ d=../../build_unix
+ REQUIRED_JARS=$JUNIT_JAR
+ DB_LIB_DIR="$d/.libs"
+ CP_SEP=":"
+ ;;
+esac
+TESTDESTDIR=${TESTDESTDIR:-testdestdir}
+DB_JAR="$d/db.jar"
+export DB_JAR
+export REQUIRED_JARS
+export CP_SEP
+
+# Build the tests.
+
+make clean
+
+[ -f ./dbtest.jar ] || (make dbtest.jar) || {
+ echo 'FAIL: unable to find or build dbtest.jar'
+ exit 1
+}
+
+# Perform initialization needed before StoredClassCatalogTest is run in
+# the tests below. The testserial directory must be first in the classpath.
+
+c="com.sleepycat.collections.test.serial.StoredClassCatalogTestInit"
+echo "Running: $c"
+if java -Djava.library.path=$DB_LIB_DIR -Dtestdestdir=$TESTDESTDIR \
+ -cp "testserial${CP_SEP}$REQUIRED_JARS${CP_SEP}$DB_JAR${CP_SEP}./dbtest.jar" $c ; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+# Run the tests.
+
+for f in `find classes -name "*Test.class"`; do
+ c=`echo "$f" | sed -e 's/classes\///' -e 's/\.class//' -e 's/\//./g'`
+ echo "Running: $c"
+ if java -Djava.library.path=$DB_LIB_DIR -Dtestdestdir=$TESTDESTDIR\
+ -cp $REQUIRED_JARS${CP_SEP}$DB_JAR${CP_SEP}./dbtest.jar $c ; then
+ :
+ else
+ echo "FAIL: test program failed"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java
new file mode 100644
index 0000000..b03584a
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/MarshalledObject.java
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.Serializable;
+
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * @author Mark Hayes
+ */
+@SuppressWarnings("serial")
+public class MarshalledObject
+ implements Serializable, MarshalledTupleKeyEntity {
+
+ private String data;
+ private transient String primaryKey;
+ private String indexKey1;
+ private String indexKey2;
+
+ public MarshalledObject(String data, String primaryKey,
+ String indexKey1, String indexKey2) {
+ this.data = data;
+ this.primaryKey = primaryKey;
+ this.indexKey1 = indexKey1;
+ this.indexKey2 = indexKey2;
+ }
+
+ public boolean equals(Object o) {
+
+ try {
+ MarshalledObject other = (MarshalledObject) o;
+
+ return this.data.equals(other.data) &&
+ this.primaryKey.equals(other.primaryKey) &&
+ this.indexKey1.equals(other.indexKey1) &&
+ this.indexKey2.equals(other.indexKey2);
+ } catch (Exception e) {
+ return false;
+ }
+ }
+
+ public String getData() {
+
+ return data;
+ }
+
+ public String getPrimaryKey() {
+
+ return primaryKey;
+ }
+
+ public String getIndexKey1() {
+
+ return indexKey1;
+ }
+
+ public String getIndexKey2() {
+
+ return indexKey2;
+ }
+
+ public int expectedKeyLength() {
+
+ return primaryKey.length() + 1;
+ }
+
+ public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+ keyOutput.writeString(primaryKey);
+ }
+
+ public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+ primaryKey = keyInput.readString();
+ }
+
+ public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+ if ("1".equals(keyName)) {
+ if (indexKey1.length() > 0) {
+ keyOutput.writeString(indexKey1);
+ return true;
+ } else {
+ return false;
+ }
+ } else if ("2".equals(keyName)) {
+ if (indexKey2.length() > 0) {
+ keyOutput.writeString(indexKey2);
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ throw new IllegalArgumentException("Unknown keyName: " + keyName);
+ }
+ }
+
+ public boolean nullifyForeignKey(String keyName) {
+
+ if ("1".equals(keyName)) {
+ if (indexKey1.length() > 0) {
+ indexKey1 = "";
+ return true;
+ } else {
+ return false;
+ }
+ } else if ("2".equals(keyName)) {
+ if (indexKey2.length() > 0) {
+ indexKey2 = "";
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ throw new IllegalArgumentException("Unknown keyName: " + keyName);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java
new file mode 100644
index 0000000..a8ad23e
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/NullClassCatalog.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.ObjectStreamClass;
+import java.math.BigInteger;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+
+/**
+ * NullCatalog is a dummy Catalog implementation that simply
+ * returns large (8 byte) class IDs so that ObjectOutput
+ * can be simulated when computing a serialized size.
+ *
+ * @author Mark Hayes
+ */
+class NullClassCatalog implements ClassCatalog {
+
+ private long id = Long.MAX_VALUE;
+
+ public void close() {
+ }
+
+ public byte[] getClassID(ObjectStreamClass classFormat) {
+ return BigInteger.valueOf(id--).toByteArray();
+ }
+
+ public ObjectStreamClass getClassFormat(byte[] classID) {
+ return null; // ObjectInput not supported
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java
new file mode 100644
index 0000000..a9ddbed
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/SerialBindingTest.java
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.Serializable;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.SerialSerialBinding;
+import com.sleepycat.bind.serial.TupleSerialMarshalledBinding;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class SerialBindingTest extends TestCase {
+
+ private ClassCatalog catalog;
+ private DatabaseEntry buffer;
+ private DatabaseEntry keyBuffer;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(SerialBindingTest.class);
+ return suite;
+ }
+
+ public SerialBindingTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+
+ SharedTestUtils.printTestName("SerialBindingTest." + getName());
+ catalog = new TestClassCatalog();
+ buffer = new DatabaseEntry();
+ keyBuffer = new DatabaseEntry();
+ }
+
+ @Override
+ public void tearDown() {
+
+ /* Ensure that GC can cleanup. */
+ catalog = null;
+ buffer = null;
+ keyBuffer = null;
+ }
+
+ @Override
+ public void runTest()
+ throws Throwable {
+
+ try {
+ super.runTest();
+ } catch (Exception e) {
+ throw ExceptionUnwrapper.unwrap(e);
+ }
+ }
+
+ private void primitiveBindingTest(Object val) {
+
+ Class cls = val.getClass();
+ SerialBinding binding = new SerialBinding(catalog, cls);
+
+ binding.objectToEntry(val, buffer);
+ assertTrue(buffer.getSize() > 0);
+
+ Object val2 = binding.entryToObject(buffer);
+ assertSame(cls, val2.getClass());
+ assertEquals(val, val2);
+
+ Object valWithWrongCls = (cls == String.class)
+ ? ((Object) new Integer(0)) : ((Object) new String(""));
+ try {
+ binding.objectToEntry(valWithWrongCls, buffer);
+ } catch (IllegalArgumentException expected) {}
+ }
+
+ public void testPrimitiveBindings() {
+
+ primitiveBindingTest("abc");
+ primitiveBindingTest(new Character('a'));
+ primitiveBindingTest(new Boolean(true));
+ primitiveBindingTest(new Byte((byte) 123));
+ primitiveBindingTest(new Short((short) 123));
+ primitiveBindingTest(new Integer(123));
+ primitiveBindingTest(new Long(123));
+ primitiveBindingTest(new Float(123.123));
+ primitiveBindingTest(new Double(123.123));
+ }
+
+ public void testNullObjects() {
+
+ SerialBinding binding = new SerialBinding(catalog, null);
+ buffer.setSize(0);
+ binding.objectToEntry(null, buffer);
+ assertTrue(buffer.getSize() > 0);
+ assertEquals(null, binding.entryToObject(buffer));
+ }
+
+ public void testSerialSerialBinding() {
+
+ SerialBinding keyBinding = new SerialBinding(catalog, String.class);
+ SerialBinding valueBinding = new SerialBinding(catalog, String.class);
+ EntityBinding binding = new MySerialSerialBinding(keyBinding,
+ valueBinding);
+
+ String val = "key#value?indexKey";
+ binding.objectToData(val, buffer);
+ assertTrue(buffer.getSize() > 0);
+ binding.objectToKey(val, keyBuffer);
+ assertTrue(keyBuffer.getSize() > 0);
+
+ Object result = binding.entryToObject(keyBuffer, buffer);
+ assertEquals(val, result);
+ }
+
+ // also tests TupleSerialBinding since TupleSerialMarshalledBinding extends
+ // it
+ public void testTupleSerialMarshalledBinding() {
+
+ SerialBinding valueBinding = new SerialBinding(catalog,
+ MarshalledObject.class);
+ EntityBinding binding =
+ new TupleSerialMarshalledBinding(valueBinding);
+
+ MarshalledObject val = new MarshalledObject("abc", "primary",
+ "index1", "index2");
+ binding.objectToData(val, buffer);
+ assertTrue(buffer.getSize() > 0);
+ binding.objectToKey(val, keyBuffer);
+ assertEquals(val.expectedKeyLength(), keyBuffer.getSize());
+
+ Object result = binding.entryToObject(keyBuffer, buffer);
+ assertTrue(result instanceof MarshalledObject);
+ val = (MarshalledObject) result;
+ assertEquals("abc", val.getData());
+ assertEquals("primary", val.getPrimaryKey());
+ assertEquals("index1", val.getIndexKey1());
+ assertEquals("index2", val.getIndexKey2());
+ }
+
+ public void testBufferSize() {
+
+ CaptureSizeBinding binding =
+ new CaptureSizeBinding(catalog, String.class);
+
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertEquals(FastOutputStream.DEFAULT_INIT_SIZE, binding.bufSize);
+
+ binding.setSerialBufferSize(1000);
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertEquals(1000, binding.bufSize);
+ }
+
+ private static class CaptureSizeBinding extends SerialBinding {
+
+ int bufSize;
+
+ CaptureSizeBinding(ClassCatalog classCatalog, Class baseClass) {
+ super(classCatalog, baseClass);
+ }
+
+ @Override
+ public FastOutputStream getSerialOutput(Object object) {
+ FastOutputStream fos = super.getSerialOutput(object);
+ bufSize = fos.getBufferBytes().length;
+ return fos;
+ }
+ }
+
+ public void testBufferOverride() {
+
+ FastOutputStream out = new FastOutputStream(10);
+ CachedOutputBinding binding =
+ new CachedOutputBinding(catalog, String.class, out);
+
+ binding.used = false;
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertTrue(binding.used);
+
+ binding.used = false;
+ binding.objectToEntry("aaaaaaaaaaaaaaaaaaaaaa", buffer);
+ assertEquals("aaaaaaaaaaaaaaaaaaaaaa", binding.entryToObject(buffer));
+ assertTrue(binding.used);
+
+ binding.used = false;
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertTrue(binding.used);
+ }
+
+ private static class CachedOutputBinding extends SerialBinding {
+
+ FastOutputStream out;
+ boolean used;
+
+ CachedOutputBinding(ClassCatalog classCatalog,
+ Class baseClass,
+ FastOutputStream out) {
+ super(classCatalog, baseClass);
+ this.out = out;
+ }
+
+ @Override
+ public FastOutputStream getSerialOutput(Object object) {
+ out.reset();
+ used = true;
+ return out;
+ }
+ }
+
+ private static class MySerialSerialBinding extends SerialSerialBinding {
+
+ private MySerialSerialBinding(SerialBinding keyBinding,
+ SerialBinding valueBinding) {
+
+ super(keyBinding, valueBinding);
+ }
+
+ @Override
+ public Object entryToObject(Object keyInput, Object valueInput) {
+
+ return "" + keyInput + '#' + valueInput;
+ }
+
+ @Override
+ public Object objectToKey(Object object) {
+
+ String s = (String) object;
+ int i = s.indexOf('#');
+ if (i < 0 || i == s.length() - 1) {
+ throw new IllegalArgumentException(s);
+ } else {
+ return s.substring(0, i);
+ }
+ }
+
+ @Override
+ public Object objectToData(Object object) {
+
+ String s = (String) object;
+ int i = s.indexOf('#');
+ if (i < 0 || i == s.length() - 1) {
+ throw new IllegalArgumentException(s);
+ } else {
+ return s.substring(i + 1);
+ }
+ }
+ }
+
+ /**
+ * Tests that overriding SerialBinding.getClassLoader is possible. This is
+ * a crude test because to create a truly working class loader is a large
+ * undertaking.
+ */
+ public void testClassloaderOverride() {
+ DatabaseEntry entry = new DatabaseEntry();
+
+ SerialBinding binding = new CustomLoaderBinding
+ (catalog, null, new FailureClassLoader());
+
+ try {
+ binding.objectToEntry(new MyClass(), entry);
+ binding.entryToObject(entry);
+ fail();
+ } catch (RuntimeException e) {
+ assertTrue(e.getMessage().startsWith("expect failure"));
+ }
+ }
+
+ private static class CustomLoaderBinding extends SerialBinding {
+
+ private final ClassLoader loader;
+
+ CustomLoaderBinding(ClassCatalog classCatalog,
+ Class baseClass,
+ ClassLoader loader) {
+
+ super(classCatalog, baseClass);
+ this.loader = loader;
+ }
+
+ @Override
+ public ClassLoader getClassLoader() {
+ return loader;
+ }
+ }
+
+ private static class FailureClassLoader extends ClassLoader {
+
+ @Override
+ public Class loadClass(String name) {
+ throw new RuntimeException("expect failure: " + name);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ private static class MyClass implements Serializable {
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java
new file mode 100644
index 0000000..5311302
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/serial/test/TestClassCatalog.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.serial.test;
+
+import java.io.ObjectStreamClass;
+import java.util.HashMap;
+
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.db.DatabaseException;
+
+/**
+ * @author Mark Hayes
+ */
+public class TestClassCatalog implements ClassCatalog {
+
+ private final HashMap idToDescMap = new HashMap();
+ private final HashMap nameToIdMap = new HashMap();
+ private int nextId = 1;
+
+ public TestClassCatalog() {
+ }
+
+ public void close() {
+ }
+
+ public synchronized byte[] getClassID(ObjectStreamClass desc) {
+ String className = desc.getName();
+ byte[] id = (byte[]) nameToIdMap.get(className);
+ if (id == null) {
+ String strId = String.valueOf(nextId);
+ id = strId.getBytes();
+ nextId += 1;
+
+ idToDescMap.put(strId, desc);
+ nameToIdMap.put(className, id);
+ }
+ return id;
+ }
+
+ public synchronized ObjectStreamClass getClassFormat(byte[] id)
+ throws DatabaseException {
+
+ String strId = new String(id);
+ ObjectStreamClass desc = (ObjectStreamClass) idToDescMap.get(strId);
+ if (desc == null) {
+ throw new DatabaseException("classID not found");
+ }
+ return desc;
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java
new file mode 100644
index 0000000..8bf52d5
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/test/BindingSpeedTest.java
@@ -0,0 +1,484 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.test;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Serializable;
+import java.io.Writer;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+import javax.xml.parsers.SAXParserFactory;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.xml.sax.InputSource;
+import org.xml.sax.XMLReader;
+
+import com.sleepycat.bind.serial.SerialInput;
+import com.sleepycat.bind.serial.SerialOutput;
+import com.sleepycat.bind.serial.test.TestClassCatalog;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.util.FastInputStream;
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class BindingSpeedTest extends TestCase {
+
+ static final String JAVA_UNSHARED = "java-unshared".intern();
+ static final String JAVA_SHARED = "java-shared".intern();
+ static final String JAVA_EXTERNALIZABLE = "java-externalizable".intern();
+ static final String XML_SAX = "xml-sax".intern();
+ static final String TUPLE = "tuple".intern();
+ static final String REFLECT_METHOD = "reflectMethod".intern();
+ static final String REFLECT_FIELD = "reflectField".intern();
+
+ static final int RUN_COUNT = 1000;
+ static final boolean VERBOSE = false;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+
+ TestSuite suite = new TestSuite();
+ suite.addTest(new BindingSpeedTest(JAVA_UNSHARED));
+ suite.addTest(new BindingSpeedTest(JAVA_SHARED));
+ suite.addTest(new BindingSpeedTest(JAVA_EXTERNALIZABLE));
+ suite.addTest(new BindingSpeedTest(XML_SAX));
+ suite.addTest(new BindingSpeedTest(TUPLE));
+ suite.addTest(new BindingSpeedTest(REFLECT_METHOD));
+ suite.addTest(new BindingSpeedTest(REFLECT_FIELD));
+ return suite;
+ }
+
+ private String command;
+ private FastOutputStream fo;
+ private TupleOutput to;
+ private TestClassCatalog jtc;
+ private byte[] buf;
+ private XMLReader parser;
+ private Method[] getters;
+ private Method[] setters;
+ private Field[] fields;
+
+ public BindingSpeedTest(String name) {
+
+ super("BindingSpeedTest." + name);
+ command = name;
+ }
+
+ @Override
+ public void runTest()
+ throws Exception {
+
+ SharedTestUtils.printTestName(getName());
+
+ boolean isTuple = false;
+ boolean isReflectMethod = false;
+ boolean isReflectField = false;
+ boolean isXmlSax = false;
+ boolean isSerial = false;
+ boolean isShared = false;
+ boolean isExternalizable = false;
+
+ if (command == TUPLE) {
+ isTuple = true;
+ } else if (command == REFLECT_METHOD) {
+ isReflectMethod = true;
+ } else if (command == REFLECT_FIELD) {
+ isReflectField = true;
+ } else if (command == XML_SAX) {
+ isXmlSax = true;
+ } else if (command == JAVA_UNSHARED) {
+ isSerial = true;
+ } else if (command == JAVA_SHARED) {
+ isSerial = true;
+ isShared = true;
+ } else if (command == JAVA_EXTERNALIZABLE) {
+ isSerial = true;
+ isShared = true;
+ isExternalizable = true;
+ } else {
+ throw new Exception("invalid command: " + command);
+ }
+
+ // Do initialization
+
+ if (isTuple) {
+ initTuple();
+ } else if (isReflectMethod) {
+ initReflectMethod();
+ } else if (isReflectField) {
+ initReflectField();
+ } else if (isXmlSax) {
+ initXmlSax();
+ } else if (isSerial) {
+ if (isShared) {
+ initSerialShared();
+ } else {
+ initSerialUnshared();
+ }
+ }
+
+ // Prime the Java compiler
+
+ int size = 0;
+ for (int i = 0; i < RUN_COUNT; i += 1) {
+
+ if (isTuple) {
+ size = runTuple();
+ } else if (isReflectMethod) {
+ size = runReflectMethod();
+ } else if (isReflectField) {
+ size = runReflectField();
+ } else if (isXmlSax) {
+ size = runXmlSax();
+ } else if (isSerial) {
+ if (isShared) {
+ if (isExternalizable) {
+ size = runSerialExternalizable();
+ } else {
+ size = runSerialShared();
+ }
+ } else {
+ size = runSerialUnshared();
+ }
+ }
+ }
+
+ // Then run the timing tests
+
+ long startTime = System.currentTimeMillis();
+
+ for (int i = 0; i < RUN_COUNT; i += 1) {
+ if (isTuple) {
+ size = runTuple();
+ } else if (isReflectMethod) {
+ size = runReflectMethod();
+ } else if (isReflectField) {
+ size = runReflectField();
+ } else if (isXmlSax) {
+ size = runXmlSax();
+ } else if (isSerial) {
+ if (isShared) {
+ if (isExternalizable) {
+ size = runSerialExternalizable();
+ } else {
+ size = runSerialShared();
+ }
+ } else {
+ size = runSerialUnshared();
+ }
+ }
+ }
+
+ long stopTime = System.currentTimeMillis();
+
+ assertTrue("data size too big", size < 250);
+
+ if (VERBOSE) {
+ System.out.println(command);
+ System.out.println("data size: " + size);
+ System.out.println("run time: " +
+ ((stopTime - startTime) / (double) RUN_COUNT));
+ }
+ }
+
+ @Override
+ public void tearDown() {
+
+ /* Ensure that GC can cleanup. */
+ command = null;
+ fo = null;
+ to = null;
+ jtc = null;
+ buf = null;
+ parser = null;
+ }
+
+ void initSerialUnshared() {
+ fo = new FastOutputStream();
+ }
+
+ int runSerialUnshared()
+ throws Exception {
+
+ fo.reset();
+ ObjectOutputStream oos = new ObjectOutputStream(fo);
+ oos.writeObject(new Data());
+ byte[] bytes = fo.toByteArray();
+ FastInputStream fi = new FastInputStream(bytes);
+ ObjectInputStream ois = new ObjectInputStream(fi);
+ ois.readObject();
+ return bytes.length;
+ }
+
+ void initSerialShared() {
+ jtc = new TestClassCatalog();
+ fo = new FastOutputStream();
+ }
+
+ int runSerialShared()
+ throws Exception {
+
+ fo.reset();
+ SerialOutput oos = new SerialOutput(fo, jtc);
+ oos.writeObject(new Data());
+ byte[] bytes = fo.toByteArray();
+ FastInputStream fi = new FastInputStream(bytes);
+ SerialInput ois = new SerialInput(fi, jtc);
+ ois.readObject();
+ return (bytes.length - SerialOutput.getStreamHeader().length);
+ }
+
+ int runSerialExternalizable()
+ throws Exception {
+
+ fo.reset();
+ SerialOutput oos = new SerialOutput(fo, jtc);
+ oos.writeObject(new Data2());
+ byte[] bytes = fo.toByteArray();
+ FastInputStream fi = new FastInputStream(bytes);
+ SerialInput ois = new SerialInput(fi, jtc);
+ ois.readObject();
+ return (bytes.length - SerialOutput.getStreamHeader().length);
+ }
+
+ void initTuple() {
+ buf = new byte[500];
+ to = new TupleOutput(buf);
+ }
+
+ int runTuple() {
+ to.reset();
+ new Data().writeTuple(to);
+
+ TupleInput ti = new TupleInput(
+ to.getBufferBytes(), to.getBufferOffset(),
+ to.getBufferLength());
+ new Data().readTuple(ti);
+
+ return to.getBufferLength();
+ }
+
+ void initReflectMethod()
+ throws Exception {
+
+ initTuple();
+
+ Class cls = Data.class;
+
+ getters = new Method[5];
+ getters[0] = cls.getMethod("getField1", new Class[0]);
+ getters[1] = cls.getMethod("getField2", new Class[0]);
+ getters[2] = cls.getMethod("getField3", new Class[0]);
+ getters[3] = cls.getMethod("getField4", new Class[0]);
+ getters[4] = cls.getMethod("getField5", new Class[0]);
+
+ setters = new Method[5];
+ setters[0] = cls.getMethod("setField1", new Class[] {String.class});
+ setters[1] = cls.getMethod("setField2", new Class[] {String.class});
+ setters[2] = cls.getMethod("setField3", new Class[] {Integer.TYPE});
+ setters[3] = cls.getMethod("setField4", new Class[] {Integer.TYPE});
+ setters[4] = cls.getMethod("setField5", new Class[] {String.class});
+ }
+
+ int runReflectMethod()
+ throws Exception {
+
+ to.reset();
+ Data data = new Data();
+ to.writeString((String) getters[0].invoke(data, (Object[]) null));
+ to.writeString((String) getters[1].invoke(data, (Object[]) null));
+ to.writeInt(((Integer) getters[2].invoke(data, (Object[]) null)).intValue());
+ to.writeInt(((Integer) getters[3].invoke(data, (Object[]) null)).intValue());
+ to.writeString((String) getters[4].invoke(data, (Object[]) null));
+
+ TupleInput ti = new TupleInput(
+ to.getBufferBytes(), to.getBufferOffset(),
+ to.getBufferLength());
+ data = new Data();
+ setters[0].invoke(data, new Object[] {ti.readString()});
+ setters[1].invoke(data, new Object[] {ti.readString()});
+ setters[2].invoke(data, new Object[] {new Integer(ti.readInt())});
+ setters[3].invoke(data, new Object[] {new Integer(ti.readInt())});
+ setters[4].invoke(data, new Object[] {ti.readString()});
+
+ return to.getBufferLength();
+ }
+
+ void initReflectField()
+ throws Exception {
+
+ initTuple();
+
+ Class cls = Data.class;
+
+ fields = new Field[5];
+ fields[0] = cls.getField("field1");
+ fields[1] = cls.getField("field2");
+ fields[2] = cls.getField("field3");
+ fields[3] = cls.getField("field4");
+ fields[4] = cls.getField("field5");
+ }
+
+ int runReflectField()
+ throws Exception {
+
+ to.reset();
+ Data data = new Data();
+ to.writeString((String) fields[0].get(data));
+ to.writeString((String) fields[1].get(data));
+ to.writeInt(((Integer) fields[2].get(data)).intValue());
+ to.writeInt(((Integer) fields[3].get(data)).intValue());
+ to.writeString((String) fields[4].get(data));
+
+ TupleInput ti = new TupleInput(
+ to.getBufferBytes(), to.getBufferOffset(),
+ to.getBufferLength());
+ data = new Data();
+ fields[0].set(data, ti.readString());
+ fields[1].set(data, ti.readString());
+ fields[2].set(data, new Integer(ti.readInt()));
+ fields[3].set(data, new Integer(ti.readInt()));
+ fields[4].set(data, ti.readString());
+
+ return to.getBufferLength();
+ }
+
+ void initXmlSax()
+ throws Exception {
+
+ buf = new byte[500];
+ fo = new FastOutputStream();
+ SAXParserFactory saxFactory = SAXParserFactory.newInstance();
+ saxFactory.setNamespaceAware(true);
+ parser = saxFactory.newSAXParser().getXMLReader();
+ }
+
+ int runXmlSax()
+ throws Exception {
+
+ fo.reset();
+ OutputStreamWriter writer = new OutputStreamWriter(fo);
+ new Data().writeXmlText(writer);
+
+ byte[] bytes = fo.toByteArray();
+ FastInputStream fi = new FastInputStream(bytes);
+ InputSource input = new InputSource(fi);
+ parser.parse(input);
+
+ //InputStreamReader reader = new InputStreamReader(fi);
+ //new Data().readXmlText(??);
+
+ return bytes.length;
+ }
+
+ static class Data2 extends Data implements Externalizable {
+
+ public Data2() {}
+
+ public void readExternal(ObjectInput in)
+ throws IOException {
+
+ field1 = in.readUTF();
+ field2 = in.readUTF();
+ field3 = in.readInt();
+ field4 = in.readInt();
+ field5 = in.readUTF();
+ }
+
+ public void writeExternal(ObjectOutput out)
+ throws IOException {
+
+ out.writeUTF(field1);
+ out.writeUTF(field2);
+ out.writeInt(field3);
+ out.writeInt(field4);
+ out.writeUTF(field5);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static class Data implements Serializable {
+
+ public String field1 = "field1";
+ public String field2 = "field2";
+ public int field3 = 333;
+ public int field4 = 444;
+ public String field5 = "field5";
+
+ public String getField1() { return field1; }
+ public String getField2() { return field2; }
+ public int getField3() { return field3; }
+ public int getField4() { return field4; }
+ public String getField5() { return field5; }
+
+ public void setField1(String v) { field1 = v; }
+ public void setField2(String v) { field2 = v; }
+ public void setField3(int v) { field3 = v; }
+ public void setField4(int v) { field4 = v; }
+ public void setField5(String v) { field5 = v; }
+
+ void readTuple(TupleInput _input) {
+
+ field1 = _input.readString();
+ field2 = _input.readString();
+ field3 = _input.readInt();
+ field4 = _input.readInt();
+ field5 = _input.readString();
+ }
+
+ void writeTuple(TupleOutput _output) {
+
+ _output.writeString(field1);
+ _output.writeString(field2);
+ _output.writeInt(field3);
+ _output.writeInt(field4);
+ _output.writeString(field5);
+ }
+
+ void writeXmlText(Writer writer) throws IOException {
+
+ writer.write("<Data><Field1>");
+ writer.write(field1);
+ writer.write("</Field1><Field2>");
+ writer.write(field2);
+ writer.write("</Field2><Field3>");
+ writer.write(String.valueOf(field3));
+ writer.write("</Field3><Field4>");
+ writer.write(String.valueOf(field4));
+ writer.write("</Field4><Field5>");
+ writer.write(field5);
+ writer.write("</Field5></Data>");
+ writer.flush();
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java
new file mode 100644
index 0000000..bf98785
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/MarshalledObject.java
@@ -0,0 +1,136 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import com.sleepycat.bind.tuple.MarshalledTupleEntry;
+import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+/**
+ * @author Mark Hayes
+ */
+public class MarshalledObject
+ implements MarshalledTupleEntry, MarshalledTupleKeyEntity {
+
+ private String data;
+ private String primaryKey;
+ private String indexKey1;
+ private String indexKey2;
+
+ public MarshalledObject() {
+ }
+
+ MarshalledObject(String data, String primaryKey,
+ String indexKey1, String indexKey2) {
+
+ this.data = data;
+ this.primaryKey = primaryKey;
+ this.indexKey1 = indexKey1;
+ this.indexKey2 = indexKey2;
+ }
+
+ String getData() {
+
+ return data;
+ }
+
+ String getPrimaryKey() {
+
+ return primaryKey;
+ }
+
+ String getIndexKey1() {
+
+ return indexKey1;
+ }
+
+ String getIndexKey2() {
+
+ return indexKey2;
+ }
+
+ int expectedDataLength() {
+
+ return data.length() + 1 +
+ indexKey1.length() + 1 +
+ indexKey2.length() + 1;
+ }
+
+ int expectedKeyLength() {
+
+ return primaryKey.length() + 1;
+ }
+
+ public void marshalEntry(TupleOutput dataOutput) {
+
+ dataOutput.writeString(data);
+ dataOutput.writeString(indexKey1);
+ dataOutput.writeString(indexKey2);
+ }
+
+ public void unmarshalEntry(TupleInput dataInput) {
+
+ data = dataInput.readString();
+ indexKey1 = dataInput.readString();
+ indexKey2 = dataInput.readString();
+ }
+
+ public void marshalPrimaryKey(TupleOutput keyOutput) {
+
+ keyOutput.writeString(primaryKey);
+ }
+
+ public void unmarshalPrimaryKey(TupleInput keyInput) {
+
+ primaryKey = keyInput.readString();
+ }
+
+ public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) {
+
+ if ("1".equals(keyName)) {
+ if (indexKey1.length() > 0) {
+ keyOutput.writeString(indexKey1);
+ return true;
+ } else {
+ return false;
+ }
+ } else if ("2".equals(keyName)) {
+ if (indexKey1.length() > 0) {
+ keyOutput.writeString(indexKey2);
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ throw new IllegalArgumentException("Unknown keyName: " + keyName);
+ }
+ }
+
+ public boolean nullifyForeignKey(String keyName) {
+
+ if ("1".equals(keyName)) {
+ if (indexKey1.length() > 0) {
+ indexKey1 = "";
+ return true;
+ } else {
+ return false;
+ }
+ } else if ("2".equals(keyName)) {
+ if (indexKey1.length() > 0) {
+ indexKey2 = "";
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ throw new IllegalArgumentException("Unknown keyName: " + keyName);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java
new file mode 100644
index 0000000..8f315ff
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleBindingTest.java
@@ -0,0 +1,426 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import java.math.BigInteger;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.tuple.BigIntegerBinding;
+import com.sleepycat.bind.tuple.BooleanBinding;
+import com.sleepycat.bind.tuple.ByteBinding;
+import com.sleepycat.bind.tuple.CharacterBinding;
+import com.sleepycat.bind.tuple.DoubleBinding;
+import com.sleepycat.bind.tuple.FloatBinding;
+import com.sleepycat.bind.tuple.IntegerBinding;
+import com.sleepycat.bind.tuple.LongBinding;
+import com.sleepycat.bind.tuple.ShortBinding;
+import com.sleepycat.bind.tuple.SortedDoubleBinding;
+import com.sleepycat.bind.tuple.SortedFloatBinding;
+import com.sleepycat.bind.tuple.StringBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleInputBinding;
+import com.sleepycat.bind.tuple.TupleMarshalledBinding;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.bind.tuple.TupleTupleMarshalledBinding;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleBindingTest extends TestCase {
+
+ private DatabaseEntry buffer;
+ private DatabaseEntry keyBuffer;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(TupleBindingTest.class);
+ return suite;
+ }
+
+ public TupleBindingTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+
+ SharedTestUtils.printTestName("TupleBindingTest." + getName());
+ buffer = new DatabaseEntry();
+ keyBuffer = new DatabaseEntry();
+ }
+
+ @Override
+ public void tearDown() {
+
+ /* Ensure that GC can cleanup. */
+ buffer = null;
+ keyBuffer = null;
+ }
+
+ @Override
+ public void runTest()
+ throws Throwable {
+
+ try {
+ super.runTest();
+ } catch (Exception e) {
+ throw ExceptionUnwrapper.unwrap(e);
+ }
+ }
+
+ private void primitiveBindingTest(Class primitiveCls, Class compareCls,
+ Object val, int byteSize) {
+
+ TupleBinding binding = TupleBinding.getPrimitiveBinding(primitiveCls);
+
+ /* Test standard object binding. */
+
+ binding.objectToEntry(val, buffer);
+ assertEquals(byteSize, buffer.getSize());
+
+ Object val2 = binding.entryToObject(buffer);
+ assertSame(compareCls, val2.getClass());
+ assertEquals(val, val2);
+
+ Object valWithWrongCls = (primitiveCls == String.class)
+ ? ((Object) new Integer(0)) : ((Object) new String(""));
+ try {
+ binding.objectToEntry(valWithWrongCls, buffer);
+ }
+ catch (ClassCastException expected) {}
+
+ /* Test nested tuple binding. */
+ forMoreCoverageTest(binding, val);
+ }
+
+ private void forMoreCoverageTest(TupleBinding val1,Object val2) {
+
+ TupleOutput output = new TupleOutput();
+ output.writeString("abc");
+ val1.objectToEntry(val2, output);
+ output.writeString("xyz");
+
+ TupleInput input = new TupleInput(output);
+ assertEquals("abc", input.readString());
+ Object val3 = val1.entryToObject(input);
+ assertEquals("xyz", input.readString());
+
+ assertEquals(0, input.available());
+ assertSame(val2.getClass(), val3.getClass());
+ assertEquals(val2, val3);
+ }
+
+ public void testPrimitiveBindings() {
+
+ primitiveBindingTest(String.class, String.class,
+ "abc", 4);
+
+ primitiveBindingTest(Character.class, Character.class,
+ new Character('a'), 2);
+ primitiveBindingTest(Boolean.class, Boolean.class,
+ new Boolean(true), 1);
+ primitiveBindingTest(Byte.class, Byte.class,
+ new Byte((byte) 123), 1);
+ primitiveBindingTest(Short.class, Short.class,
+ new Short((short) 123), 2);
+ primitiveBindingTest(Integer.class, Integer.class,
+ new Integer(123), 4);
+ primitiveBindingTest(Long.class, Long.class,
+ new Long(123), 8);
+ primitiveBindingTest(Float.class, Float.class,
+ new Float(123.123), 4);
+ primitiveBindingTest(Double.class, Double.class,
+ new Double(123.123), 8);
+
+ primitiveBindingTest(Character.TYPE, Character.class,
+ new Character('a'), 2);
+ primitiveBindingTest(Boolean.TYPE, Boolean.class,
+ new Boolean(true), 1);
+ primitiveBindingTest(Byte.TYPE, Byte.class,
+ new Byte((byte) 123), 1);
+ primitiveBindingTest(Short.TYPE, Short.class,
+ new Short((short) 123), 2);
+ primitiveBindingTest(Integer.TYPE, Integer.class,
+ new Integer(123), 4);
+ primitiveBindingTest(Long.TYPE, Long.class,
+ new Long(123), 8);
+ primitiveBindingTest(Float.TYPE, Float.class,
+ new Float(123.123), 4);
+ primitiveBindingTest(Double.TYPE, Double.class,
+ new Double(123.123), 8);
+
+ DatabaseEntry entry = new DatabaseEntry();
+
+ StringBinding.stringToEntry("abc", entry);
+ assertEquals(4, entry.getData().length);
+ assertEquals("abc", StringBinding.entryToString(entry));
+
+ new StringBinding().objectToEntry("abc", entry);
+ assertEquals(4, entry.getData().length);
+
+ StringBinding.stringToEntry(null, entry);
+ assertEquals(2, entry.getData().length);
+ assertEquals(null, StringBinding.entryToString(entry));
+
+ new StringBinding().objectToEntry(null, entry);
+ assertEquals(2, entry.getData().length);
+
+ CharacterBinding.charToEntry('a', entry);
+ assertEquals(2, entry.getData().length);
+ assertEquals('a', CharacterBinding.entryToChar(entry));
+
+ new CharacterBinding().objectToEntry(new Character('a'), entry);
+ assertEquals(2, entry.getData().length);
+
+ BooleanBinding.booleanToEntry(true, entry);
+ assertEquals(1, entry.getData().length);
+ assertEquals(true, BooleanBinding.entryToBoolean(entry));
+
+ new BooleanBinding().objectToEntry(Boolean.TRUE, entry);
+ assertEquals(1, entry.getData().length);
+
+ ByteBinding.byteToEntry((byte) 123, entry);
+ assertEquals(1, entry.getData().length);
+ assertEquals((byte) 123, ByteBinding.entryToByte(entry));
+
+ ShortBinding.shortToEntry((short) 123, entry);
+ assertEquals(2, entry.getData().length);
+ assertEquals((short) 123, ShortBinding.entryToShort(entry));
+
+ new ByteBinding().objectToEntry(new Byte((byte) 123), entry);
+ assertEquals(1, entry.getData().length);
+
+ IntegerBinding.intToEntry(123, entry);
+ assertEquals(4, entry.getData().length);
+ assertEquals(123, IntegerBinding.entryToInt(entry));
+
+ new IntegerBinding().objectToEntry(new Integer(123), entry);
+ assertEquals(4, entry.getData().length);
+
+ LongBinding.longToEntry(123, entry);
+ assertEquals(8, entry.getData().length);
+ assertEquals(123, LongBinding.entryToLong(entry));
+
+ new LongBinding().objectToEntry(new Long(123), entry);
+ assertEquals(8, entry.getData().length);
+
+ FloatBinding.floatToEntry((float) 123.123, entry);
+ assertEquals(4, entry.getData().length);
+ assertTrue(((float) 123.123) == FloatBinding.entryToFloat(entry));
+
+ new FloatBinding().objectToEntry(new Float((float) 123.123), entry);
+ assertEquals(4, entry.getData().length);
+
+ DoubleBinding.doubleToEntry(123.123, entry);
+ assertEquals(8, entry.getData().length);
+ assertTrue(123.123 == DoubleBinding.entryToDouble(entry));
+
+ new DoubleBinding().objectToEntry(new Double(123.123), entry);
+ assertEquals(8, entry.getData().length);
+
+ BigIntegerBinding.bigIntegerToEntry
+ (new BigInteger("1234567890123456"), entry);
+ assertEquals(9, entry.getData().length);
+ assertTrue((new BigInteger("1234567890123456")).equals
+ (BigIntegerBinding.entryToBigInteger(entry)));
+
+ new BigIntegerBinding().objectToEntry
+ (new BigInteger("1234567890123456"), entry);
+ assertEquals(9, entry.getData().length);
+ forMoreCoverageTest(new BigIntegerBinding(),
+ new BigInteger("1234567890123456"));
+
+ SortedFloatBinding.floatToEntry((float) 123.123, entry);
+ assertEquals(4, entry.getData().length);
+ assertTrue(((float) 123.123) ==
+ SortedFloatBinding.entryToFloat(entry));
+
+ new SortedFloatBinding().objectToEntry
+ (new Float((float) 123.123), entry);
+ assertEquals(4, entry.getData().length);
+ forMoreCoverageTest(new SortedFloatBinding(),
+ new Float((float) 123.123));
+
+ SortedDoubleBinding.doubleToEntry(123.123, entry);
+ assertEquals(8, entry.getData().length);
+ assertTrue(123.123 == SortedDoubleBinding.entryToDouble(entry));
+
+ new SortedDoubleBinding().objectToEntry(new Double(123.123), entry);
+ assertEquals(8, entry.getData().length);
+ forMoreCoverageTest(new SortedDoubleBinding(),
+ new Double(123.123));
+ }
+
+ public void testTupleInputBinding() {
+
+ EntryBinding binding = new TupleInputBinding();
+
+ TupleOutput out = new TupleOutput();
+ out.writeString("abc");
+ binding.objectToEntry(new TupleInput(out), buffer);
+ assertEquals(4, buffer.getSize());
+
+ Object result = binding.entryToObject(buffer);
+ assertTrue(result instanceof TupleInput);
+ TupleInput in = (TupleInput) result;
+ assertEquals("abc", in.readString());
+ assertEquals(0, in.available());
+ }
+
+ // also tests TupleBinding since TupleMarshalledBinding extends it
+ public void testTupleMarshalledBinding() {
+
+ EntryBinding binding =
+ new TupleMarshalledBinding(MarshalledObject.class);
+
+ MarshalledObject val = new MarshalledObject("abc", "", "", "");
+ binding.objectToEntry(val, buffer);
+ assertEquals(val.expectedDataLength(), buffer.getSize());
+
+ Object result = binding.entryToObject(buffer);
+ assertTrue(result instanceof MarshalledObject);
+ val = (MarshalledObject) result;
+ assertEquals("abc", val.getData());
+ }
+
+ // also tests TupleTupleBinding since TupleTupleMarshalledBinding extends
+ // it
+ public void testTupleTupleMarshalledBinding() {
+
+ EntityBinding binding =
+ new TupleTupleMarshalledBinding(MarshalledObject.class);
+
+ MarshalledObject val = new MarshalledObject("abc", "primary",
+ "index1", "index2");
+ binding.objectToData(val, buffer);
+ assertEquals(val.expectedDataLength(), buffer.getSize());
+ binding.objectToKey(val, keyBuffer);
+ assertEquals(val.expectedKeyLength(), keyBuffer.getSize());
+
+ Object result = binding.entryToObject(keyBuffer, buffer);
+ assertTrue(result instanceof MarshalledObject);
+ val = (MarshalledObject) result;
+ assertEquals("abc", val.getData());
+ assertEquals("primary", val.getPrimaryKey());
+ assertEquals("index1", val.getIndexKey1());
+ assertEquals("index2", val.getIndexKey2());
+ }
+
+ public void testBufferSize() {
+
+ CaptureSizeBinding binding = new CaptureSizeBinding();
+
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertEquals(FastOutputStream.DEFAULT_INIT_SIZE, binding.bufSize);
+
+ binding.setTupleBufferSize(1000);
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertEquals(1000, binding.bufSize);
+ }
+
+ private class CaptureSizeBinding extends TupleBinding {
+
+ int bufSize;
+
+ CaptureSizeBinding() {
+ super();
+ }
+
+ @Override
+ public TupleOutput getTupleOutput(Object object) {
+ TupleOutput out = super.getTupleOutput(object);
+ bufSize = out.getBufferBytes().length;
+ return out;
+ }
+
+ @Override
+ public Object entryToObject(TupleInput input) {
+ return input.readString();
+ }
+
+ @Override
+ public void objectToEntry(Object object, TupleOutput output) {
+ assertEquals(bufSize, output.getBufferBytes().length);
+ output.writeString((String) object);
+ }
+ }
+
+ public void testBufferOverride() {
+
+ TupleOutput out = new TupleOutput(new byte[10]);
+ CachedOutputBinding binding = new CachedOutputBinding(out);
+
+ binding.used = false;
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertTrue(binding.used);
+
+ binding.used = false;
+ binding.objectToEntry("aaaaaaaaaaaaaaaaaaaaaa", buffer);
+ assertEquals("aaaaaaaaaaaaaaaaaaaaaa", binding.entryToObject(buffer));
+ assertTrue(binding.used);
+
+ binding.used = false;
+ binding.objectToEntry("x", buffer);
+ assertEquals("x", binding.entryToObject(buffer));
+ assertTrue(binding.used);
+ }
+
+ private class CachedOutputBinding extends TupleBinding {
+
+ TupleOutput out;
+ boolean used;
+
+ CachedOutputBinding(TupleOutput out) {
+ super();
+ this.out = out;
+ }
+
+ @Override
+ public TupleOutput getTupleOutput(Object object) {
+ out.reset();
+ used = true;
+ return out;
+ }
+
+ @Override
+ public Object entryToObject(TupleInput input) {
+ return input.readString();
+ }
+
+ @Override
+ public void objectToEntry(Object object, TupleOutput output) {
+ assertSame(out, output);
+ output.writeString((String) object);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java
new file mode 100644
index 0000000..4c2ece6
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleFormatTest.java
@@ -0,0 +1,927 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import java.util.Arrays;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleFormatTest extends TestCase {
+
+ private TupleInput in;
+ private TupleOutput out;
+ private DatabaseEntry buffer;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(TupleFormatTest.class);
+ return suite;
+ }
+
+ public TupleFormatTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+
+ SharedTestUtils.printTestName("TupleFormatTest." + getName());
+ buffer = new DatabaseEntry();
+ out = new TupleOutput();
+ }
+
+ @Override
+ public void tearDown() {
+
+ /* Ensure that GC can cleanup. */
+ in = null;
+ out = null;
+ buffer = null;
+ }
+
+ private void copyOutputToInput() {
+
+ TupleBinding.outputToEntry(out, buffer);
+ assertEquals(out.size(), buffer.getSize());
+ in = TupleBinding.entryToInput(buffer);
+ assertEquals(in.available(), buffer.getSize());
+ assertEquals(in.getBufferLength(), buffer.getSize());
+ }
+
+ private void stringTest(String val) {
+
+ out.reset();
+ out.writeString(val);
+ assertEquals(val.length() + 1, out.size()); // assume 1-byte chars
+ copyOutputToInput();
+ assertEquals(val, in.readString());
+ assertEquals(0, in.available());
+ }
+
+ public void testString() {
+
+ stringTest("");
+ stringTest("a");
+ stringTest("abc");
+
+ out.reset();
+ out.writeString("abc");
+ out.writeString("defg");
+ assertEquals(9, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readString());
+ assertEquals("defg", in.readString());
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeString("abc");
+ out.writeString("defg");
+ out.writeString("hijkl");
+ assertEquals(15, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readString());
+ assertEquals("defg", in.readString());
+ assertEquals("hijkl", in.readString());
+ assertEquals(0, in.available());
+ }
+
+ private void fixedStringTest(char[] val) {
+
+ out.reset();
+ out.writeString(val);
+ assertEquals(val.length, out.size()); // assume 1 byte chars
+ copyOutputToInput();
+ char[] val2 = new char[val.length];
+ in.readString(val2);
+ assertTrue(Arrays.equals(val, val2));
+ assertEquals(0, in.available());
+ in.reset();
+ String val3 = in.readString(val.length);
+ assertTrue(Arrays.equals(val, val3.toCharArray()));
+ assertEquals(0, in.available());
+ }
+
+ public void testFixedString() {
+
+ fixedStringTest(new char[0]);
+ fixedStringTest(new char[] {'a'});
+ fixedStringTest(new char[] {'a', 'b', 'c'});
+
+ out.reset();
+ out.writeString(new char[] {'a', 'b', 'c'});
+ out.writeString(new char[] {'d', 'e', 'f', 'g'});
+ assertEquals(7, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readString(3));
+ assertEquals("defg", in.readString(4));
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeString(new char[] {'a', 'b', 'c'});
+ out.writeString(new char[] {'d', 'e', 'f', 'g'});
+ out.writeString(new char[] {'h', 'i', 'j', 'k', 'l'});
+ assertEquals(12, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readString(3));
+ assertEquals("defg", in.readString(4));
+ assertEquals("hijkl", in.readString(5));
+ assertEquals(0, in.available());
+ }
+
+ public void testNullString() {
+
+ out.reset();
+ out.writeString((String) null);
+ assertEquals(2, out.size());
+ copyOutputToInput();
+ assertEquals(null, in.readString());
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeString((String) null);
+ out.writeString("x");
+ assertEquals(4, out.size());
+ copyOutputToInput();
+ assertEquals(null, in.readString());
+ assertEquals(2, in.available());
+ assertEquals("x", in.readString());
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeString("x");
+ out.writeString((String) null);
+ assertEquals(4, out.size());
+ copyOutputToInput();
+ assertEquals("x", in.readString());
+ assertEquals(2, in.available());
+ assertEquals(null, in.readString());
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeString((String) null);
+ out.writeInt(123);
+ assertEquals(6, out.size());
+ copyOutputToInput();
+ assertEquals(null, in.readString());
+ assertEquals(4, in.available());
+ assertEquals(123, in.readInt());
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeInt(123);
+ out.writeString((String) null);
+ assertEquals(6, out.size());
+ copyOutputToInput();
+ assertEquals(123, in.readInt());
+ assertEquals(2, in.available());
+ assertEquals(null, in.readString());
+ assertEquals(0, in.available());
+ }
+
+ private void charsTest(char[] val) {
+
+ for (int mode = 0; mode < 2; mode += 1) {
+ out.reset();
+ switch (mode) {
+ case 0: out.writeChars(val); break;
+ case 1: out.writeChars(new String(val)); break;
+ default: throw new IllegalStateException();
+ }
+ assertEquals(val.length * 2, out.size());
+ copyOutputToInput();
+ char[] val2 = new char[val.length];
+ in.readChars(val2);
+ assertTrue(Arrays.equals(val, val2));
+ assertEquals(0, in.available());
+ in.reset();
+ String val3 = in.readChars(val.length);
+ assertTrue(Arrays.equals(val, val3.toCharArray()));
+ assertEquals(0, in.available());
+ }
+ }
+
+ public void testChars() {
+
+ charsTest(new char[0]);
+ charsTest(new char[] {'a'});
+ charsTest(new char[] {'a', 'b', 'c'});
+
+ out.reset();
+ out.writeChars("abc");
+ out.writeChars("defg");
+ assertEquals(7 * 2, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readChars(3));
+ assertEquals("defg", in.readChars(4));
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeChars("abc");
+ out.writeChars("defg");
+ out.writeChars("hijkl");
+ assertEquals(12 * 2, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readChars(3));
+ assertEquals("defg", in.readChars(4));
+ assertEquals("hijkl", in.readChars(5));
+ assertEquals(0, in.available());
+ }
+
+ private void bytesTest(char[] val) {
+
+ char[] valBytes = new char[val.length];
+ for (int i = 0; i < val.length; i += 1)
+ valBytes[i] = (char) (val[i] & 0xFF);
+
+ for (int mode = 0; mode < 2; mode += 1) {
+ out.reset();
+ switch (mode) {
+ case 0: out.writeBytes(val); break;
+ case 1: out.writeBytes(new String(val)); break;
+ default: throw new IllegalStateException();
+ }
+ assertEquals(val.length, out.size());
+ copyOutputToInput();
+ char[] val2 = new char[val.length];
+ in.readBytes(val2);
+ assertTrue(Arrays.equals(valBytes, val2));
+ assertEquals(0, in.available());
+ in.reset();
+ String val3 = in.readBytes(val.length);
+ assertTrue(Arrays.equals(valBytes, val3.toCharArray()));
+ assertEquals(0, in.available());
+ }
+ }
+
+ public void testBytes() {
+
+ bytesTest(new char[0]);
+ bytesTest(new char[] {'a'});
+ bytesTest(new char[] {'a', 'b', 'c'});
+ bytesTest(new char[] {0x7F00, 0x7FFF, 0xFF00, 0xFFFF});
+
+ out.reset();
+ out.writeBytes("abc");
+ out.writeBytes("defg");
+ assertEquals(7, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readBytes(3));
+ assertEquals("defg", in.readBytes(4));
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeBytes("abc");
+ out.writeBytes("defg");
+ out.writeBytes("hijkl");
+ assertEquals(12, out.size());
+ copyOutputToInput();
+ assertEquals("abc", in.readBytes(3));
+ assertEquals("defg", in.readBytes(4));
+ assertEquals("hijkl", in.readBytes(5));
+ assertEquals(0, in.available());
+ }
+
+ private void booleanTest(boolean val) {
+
+ out.reset();
+ out.writeBoolean(val);
+ assertEquals(1, out.size());
+ copyOutputToInput();
+ assertEquals(val, in.readBoolean());
+ assertEquals(0, in.available());
+ }
+
+ public void testBoolean() {
+
+ booleanTest(true);
+ booleanTest(false);
+
+ out.reset();
+ out.writeBoolean(true);
+ out.writeBoolean(false);
+ assertEquals(2, out.size());
+ copyOutputToInput();
+ assertEquals(true, in.readBoolean());
+ assertEquals(false, in.readBoolean());
+ assertEquals(0, in.available());
+
+ out.reset();
+ out.writeBoolean(true);
+ out.writeBoolean(false);
+ out.writeBoolean(true);
+ assertEquals(3, out.size());
+ copyOutputToInput();
+ assertEquals(true, in.readBoolean());
+ assertEquals(false, in.readBoolean());
+ assertEquals(true, in.readBoolean());
+ assertEquals(0, in.available());
+ }
+
+ private void unsignedByteTest(int val) {
+
+ unsignedByteTest(val, val);
+ }
+
+ private void unsignedByteTest(int val, int expected) {
+
+ out.reset();
+ out.writeUnsignedByte(val);
+ assertEquals(1, out.size());
+ copyOutputToInput();
+ assertEquals(expected, in.readUnsignedByte());
+ }
+
+ public void testUnsignedByte() {
+
+ unsignedByteTest(0);
+ unsignedByteTest(1);
+ unsignedByteTest(254);
+ unsignedByteTest(255);
+ unsignedByteTest(256, 0);
+ unsignedByteTest(-1, 255);
+ unsignedByteTest(-2, 254);
+ unsignedByteTest(-255, 1);
+
+ out.reset();
+ out.writeUnsignedByte(0);
+ out.writeUnsignedByte(1);
+ out.writeUnsignedByte(255);
+ assertEquals(3, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readUnsignedByte());
+ assertEquals(1, in.readUnsignedByte());
+ assertEquals(255, in.readUnsignedByte());
+ assertEquals(0, in.available());
+ }
+
+ private void unsignedShortTest(int val) {
+
+ unsignedShortTest(val, val);
+ }
+
+ private void unsignedShortTest(int val, int expected) {
+
+ out.reset();
+ out.writeUnsignedShort(val);
+ assertEquals(2, out.size());
+ copyOutputToInput();
+ assertEquals(expected, in.readUnsignedShort());
+ }
+
+ public void testUnsignedShort() {
+
+ unsignedShortTest(0);
+ unsignedShortTest(1);
+ unsignedShortTest(255);
+ unsignedShortTest(256);
+ unsignedShortTest(257);
+ unsignedShortTest(Short.MAX_VALUE - 1);
+ unsignedShortTest(Short.MAX_VALUE);
+ unsignedShortTest(Short.MAX_VALUE + 1);
+ unsignedShortTest(0xFFFF - 1);
+ unsignedShortTest(0xFFFF);
+ unsignedShortTest(0xFFFF + 1, 0);
+ unsignedShortTest(0x7FFF0000, 0);
+ unsignedShortTest(0xFFFF0000, 0);
+ unsignedShortTest(-1, 0xFFFF);
+ unsignedShortTest(-2, 0xFFFF - 1);
+ unsignedShortTest(-0xFFFF, 1);
+
+ out.reset();
+ out.writeUnsignedShort(0);
+ out.writeUnsignedShort(1);
+ out.writeUnsignedShort(0xFFFF);
+ assertEquals(6, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readUnsignedShort());
+ assertEquals(1, in.readUnsignedShort());
+ assertEquals(0xFFFF, in.readUnsignedShort());
+ assertEquals(0, in.available());
+ }
+
+ private void unsignedIntTest(long val) {
+
+ unsignedIntTest(val, val);
+ }
+
+ private void unsignedIntTest(long val, long expected) {
+
+ out.reset();
+ out.writeUnsignedInt(val);
+ assertEquals(4, out.size());
+ copyOutputToInput();
+ assertEquals(expected, in.readUnsignedInt());
+ }
+
+ public void testUnsignedInt() {
+
+ unsignedIntTest(0L);
+ unsignedIntTest(1L);
+ unsignedIntTest(255L);
+ unsignedIntTest(256L);
+ unsignedIntTest(257L);
+ unsignedIntTest(Short.MAX_VALUE - 1L);
+ unsignedIntTest(Short.MAX_VALUE);
+ unsignedIntTest(Short.MAX_VALUE + 1L);
+ unsignedIntTest(Integer.MAX_VALUE - 1L);
+ unsignedIntTest(Integer.MAX_VALUE);
+ unsignedIntTest(Integer.MAX_VALUE + 1L);
+ unsignedIntTest(0xFFFFFFFFL - 1L);
+ unsignedIntTest(0xFFFFFFFFL);
+ unsignedIntTest(0xFFFFFFFFL + 1L, 0L);
+ unsignedIntTest(0x7FFFFFFF00000000L, 0L);
+ unsignedIntTest(0xFFFFFFFF00000000L, 0L);
+ unsignedIntTest(-1, 0xFFFFFFFFL);
+ unsignedIntTest(-2, 0xFFFFFFFFL - 1L);
+ unsignedIntTest(-0xFFFFFFFFL, 1L);
+
+ out.reset();
+ out.writeUnsignedInt(0L);
+ out.writeUnsignedInt(1L);
+ out.writeUnsignedInt(0xFFFFFFFFL);
+ assertEquals(12, out.size());
+ copyOutputToInput();
+ assertEquals(0L, in.readUnsignedInt());
+ assertEquals(1L, in.readUnsignedInt());
+ assertEquals(0xFFFFFFFFL, in.readUnsignedInt());
+ assertEquals(0L, in.available());
+ }
+
+ private void byteTest(int val) {
+
+ out.reset();
+ out.writeByte(val);
+ assertEquals(1, out.size());
+ copyOutputToInput();
+ assertEquals((byte) val, in.readByte());
+ }
+
+ public void testByte() {
+
+ byteTest(0);
+ byteTest(1);
+ byteTest(-1);
+ byteTest(Byte.MAX_VALUE - 1);
+ byteTest(Byte.MAX_VALUE);
+ byteTest(Byte.MAX_VALUE + 1);
+ byteTest(Byte.MIN_VALUE + 1);
+ byteTest(Byte.MIN_VALUE);
+ byteTest(Byte.MIN_VALUE - 1);
+ byteTest(0x7F);
+ byteTest(0xFF);
+ byteTest(0x7FFF);
+ byteTest(0xFFFF);
+ byteTest(0x7FFFFFFF);
+ byteTest(0xFFFFFFFF);
+
+ out.reset();
+ out.writeByte(0);
+ out.writeByte(1);
+ out.writeByte(-1);
+ assertEquals(3, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readByte());
+ assertEquals(1, in.readByte());
+ assertEquals(-1, in.readByte());
+ assertEquals(0, in.available());
+ }
+
+ private void shortTest(int val) {
+
+ out.reset();
+ out.writeShort(val);
+ assertEquals(2, out.size());
+ copyOutputToInput();
+ assertEquals((short) val, in.readShort());
+ }
+
+ public void testShort() {
+
+ shortTest(0);
+ shortTest(1);
+ shortTest(-1);
+ shortTest(Short.MAX_VALUE - 1);
+ shortTest(Short.MAX_VALUE);
+ shortTest(Short.MAX_VALUE + 1);
+ shortTest(Short.MIN_VALUE + 1);
+ shortTest(Short.MIN_VALUE);
+ shortTest(Short.MIN_VALUE - 1);
+ shortTest(0x7F);
+ shortTest(0xFF);
+ shortTest(0x7FFF);
+ shortTest(0xFFFF);
+ shortTest(0x7FFFFFFF);
+ shortTest(0xFFFFFFFF);
+
+ out.reset();
+ out.writeShort(0);
+ out.writeShort(1);
+ out.writeShort(-1);
+ assertEquals(3 * 2, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readShort());
+ assertEquals(1, in.readShort());
+ assertEquals(-1, in.readShort());
+ assertEquals(0, in.available());
+ }
+
+ private void intTest(int val) {
+
+ out.reset();
+ out.writeInt(val);
+ assertEquals(4, out.size());
+ copyOutputToInput();
+ assertEquals(val, in.readInt());
+ }
+
+ public void testInt() {
+
+ intTest(0);
+ intTest(1);
+ intTest(-1);
+ intTest(Integer.MAX_VALUE - 1);
+ intTest(Integer.MAX_VALUE);
+ intTest(Integer.MAX_VALUE + 1);
+ intTest(Integer.MIN_VALUE + 1);
+ intTest(Integer.MIN_VALUE);
+ intTest(Integer.MIN_VALUE - 1);
+ intTest(0x7F);
+ intTest(0xFF);
+ intTest(0x7FFF);
+ intTest(0xFFFF);
+ intTest(0x7FFFFFFF);
+ intTest(0xFFFFFFFF);
+
+ out.reset();
+ out.writeInt(0);
+ out.writeInt(1);
+ out.writeInt(-1);
+ assertEquals(3 * 4, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readInt());
+ assertEquals(1, in.readInt());
+ assertEquals(-1, in.readInt());
+ assertEquals(0, in.available());
+ }
+
+ private void longTest(long val) {
+
+ out.reset();
+ out.writeLong(val);
+ assertEquals(8, out.size());
+ copyOutputToInput();
+ assertEquals(val, in.readLong());
+ }
+
+ public void testLong() {
+
+ longTest(0);
+ longTest(1);
+ longTest(-1);
+ longTest(Long.MAX_VALUE - 1);
+ longTest(Long.MAX_VALUE);
+ longTest(Long.MAX_VALUE + 1);
+ longTest(Long.MIN_VALUE + 1);
+ longTest(Long.MIN_VALUE);
+ longTest(Long.MIN_VALUE - 1);
+ longTest(0x7F);
+ longTest(0xFF);
+ longTest(0x7FFF);
+ longTest(0xFFFF);
+ longTest(0x7FFFFFFF);
+ longTest(0xFFFFFFFF);
+ longTest(0x7FFFFFFFFFFFFFFFL);
+ longTest(0xFFFFFFFFFFFFFFFFL);
+
+ out.reset();
+ out.writeLong(0);
+ out.writeLong(1);
+ out.writeLong(-1);
+ assertEquals(3 * 8, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readLong());
+ assertEquals(1, in.readLong());
+ assertEquals(-1, in.readLong());
+ assertEquals(0, in.available());
+ }
+
+ private void floatTest(double val) {
+
+ out.reset();
+ out.writeFloat((float) val);
+ assertEquals(4, out.size());
+ copyOutputToInput();
+ if (Double.isNaN(val)) {
+ assertTrue(Float.isNaN(in.readFloat()));
+ } else {
+ assertEquals((float) val, in.readFloat(), 0);
+ }
+ }
+
+ public void testFloat() {
+
+ floatTest(0);
+ floatTest(1);
+ floatTest(-1);
+ floatTest(1.0);
+ floatTest(0.1);
+ floatTest(-1.0);
+ floatTest(-0.1);
+ floatTest(Float.NaN);
+ floatTest(Float.NEGATIVE_INFINITY);
+ floatTest(Float.POSITIVE_INFINITY);
+ floatTest(Short.MAX_VALUE);
+ floatTest(Short.MIN_VALUE);
+ floatTest(Integer.MAX_VALUE);
+ floatTest(Integer.MIN_VALUE);
+ floatTest(Long.MAX_VALUE);
+ floatTest(Long.MIN_VALUE);
+ floatTest(Float.MAX_VALUE);
+ floatTest(Float.MAX_VALUE + 1);
+ floatTest(Float.MIN_VALUE + 1);
+ floatTest(Float.MIN_VALUE);
+ floatTest(Float.MIN_VALUE - 1);
+ floatTest(0x7F);
+ floatTest(0xFF);
+ floatTest(0x7FFF);
+ floatTest(0xFFFF);
+ floatTest(0x7FFFFFFF);
+ floatTest(0xFFFFFFFF);
+ floatTest(0x7FFFFFFFFFFFFFFFL);
+ floatTest(0xFFFFFFFFFFFFFFFFL);
+
+ out.reset();
+ out.writeFloat(0);
+ out.writeFloat(1);
+ out.writeFloat(-1);
+ assertEquals(3 * 4, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readFloat(), 0);
+ assertEquals(1, in.readFloat(), 0);
+ assertEquals(-1, in.readFloat(), 0);
+ assertEquals(0, in.available(), 0);
+ }
+
+ private void doubleTest(double val) {
+
+ out.reset();
+ out.writeDouble(val);
+ assertEquals(8, out.size());
+ copyOutputToInput();
+ if (Double.isNaN(val)) {
+ assertTrue(Double.isNaN(in.readDouble()));
+ } else {
+ assertEquals(val, in.readDouble(), 0);
+ }
+ }
+
+ public void testDouble() {
+
+ doubleTest(0);
+ doubleTest(1);
+ doubleTest(-1);
+ doubleTest(1.0);
+ doubleTest(0.1);
+ doubleTest(-1.0);
+ doubleTest(-0.1);
+ doubleTest(Double.NaN);
+ doubleTest(Double.NEGATIVE_INFINITY);
+ doubleTest(Double.POSITIVE_INFINITY);
+ doubleTest(Short.MAX_VALUE);
+ doubleTest(Short.MIN_VALUE);
+ doubleTest(Integer.MAX_VALUE);
+ doubleTest(Integer.MIN_VALUE);
+ doubleTest(Long.MAX_VALUE);
+ doubleTest(Long.MIN_VALUE);
+ doubleTest(Float.MAX_VALUE);
+ doubleTest(Float.MIN_VALUE);
+ doubleTest(Double.MAX_VALUE - 1);
+ doubleTest(Double.MAX_VALUE);
+ doubleTest(Double.MAX_VALUE + 1);
+ doubleTest(Double.MIN_VALUE + 1);
+ doubleTest(Double.MIN_VALUE);
+ doubleTest(Double.MIN_VALUE - 1);
+ doubleTest(0x7F);
+ doubleTest(0xFF);
+ doubleTest(0x7FFF);
+ doubleTest(0xFFFF);
+ doubleTest(0x7FFFFFFF);
+ doubleTest(0xFFFFFFFF);
+ doubleTest(0x7FFFFFFFFFFFFFFFL);
+ doubleTest(0xFFFFFFFFFFFFFFFFL);
+
+ out.reset();
+ out.writeDouble(0);
+ out.writeDouble(1);
+ out.writeDouble(-1);
+ assertEquals(3 * 8, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readDouble(), 0);
+ assertEquals(1, in.readDouble(), 0);
+ assertEquals(-1, in.readDouble(), 0);
+ assertEquals(0, in.available(), 0);
+ }
+
+ private void sortedFloatTest(double val) {
+
+ out.reset();
+ out.writeSortedFloat((float) val);
+ assertEquals(4, out.size());
+ copyOutputToInput();
+ if (Double.isNaN(val)) {
+ assertTrue(Float.isNaN(in.readSortedFloat()));
+ } else {
+ assertEquals((float) val, in.readSortedFloat(), 0);
+ }
+ }
+
+ public void testSortedFloat() {
+
+ sortedFloatTest(0);
+ sortedFloatTest(1);
+ sortedFloatTest(-1);
+ sortedFloatTest(1.0);
+ sortedFloatTest(0.1);
+ sortedFloatTest(-1.0);
+ sortedFloatTest(-0.1);
+ sortedFloatTest(Float.NaN);
+ sortedFloatTest(Float.NEGATIVE_INFINITY);
+ sortedFloatTest(Float.POSITIVE_INFINITY);
+ sortedFloatTest(Short.MAX_VALUE);
+ sortedFloatTest(Short.MIN_VALUE);
+ sortedFloatTest(Integer.MAX_VALUE);
+ sortedFloatTest(Integer.MIN_VALUE);
+ sortedFloatTest(Long.MAX_VALUE);
+ sortedFloatTest(Long.MIN_VALUE);
+ sortedFloatTest(Float.MAX_VALUE);
+ sortedFloatTest(Float.MAX_VALUE + 1);
+ sortedFloatTest(Float.MIN_VALUE + 1);
+ sortedFloatTest(Float.MIN_VALUE);
+ sortedFloatTest(Float.MIN_VALUE - 1);
+ sortedFloatTest(0x7F);
+ sortedFloatTest(0xFF);
+ sortedFloatTest(0x7FFF);
+ sortedFloatTest(0xFFFF);
+ sortedFloatTest(0x7FFFFFFF);
+ sortedFloatTest(0xFFFFFFFF);
+ sortedFloatTest(0x7FFFFFFFFFFFFFFFL);
+ sortedFloatTest(0xFFFFFFFFFFFFFFFFL);
+
+ out.reset();
+ out.writeSortedFloat(0);
+ out.writeSortedFloat(1);
+ out.writeSortedFloat(-1);
+ assertEquals(3 * 4, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readSortedFloat(), 0);
+ assertEquals(1, in.readSortedFloat(), 0);
+ assertEquals(-1, in.readSortedFloat(), 0);
+ assertEquals(0, in.available(), 0);
+ }
+
+ private void sortedDoubleTest(double val) {
+
+ out.reset();
+ out.writeSortedDouble(val);
+ assertEquals(8, out.size());
+ copyOutputToInput();
+ if (Double.isNaN(val)) {
+ assertTrue(Double.isNaN(in.readSortedDouble()));
+ } else {
+ assertEquals(val, in.readSortedDouble(), 0);
+ }
+ }
+
+ public void testSortedDouble() {
+
+ sortedDoubleTest(0);
+ sortedDoubleTest(1);
+ sortedDoubleTest(-1);
+ sortedDoubleTest(1.0);
+ sortedDoubleTest(0.1);
+ sortedDoubleTest(-1.0);
+ sortedDoubleTest(-0.1);
+ sortedDoubleTest(Double.NaN);
+ sortedDoubleTest(Double.NEGATIVE_INFINITY);
+ sortedDoubleTest(Double.POSITIVE_INFINITY);
+ sortedDoubleTest(Short.MAX_VALUE);
+ sortedDoubleTest(Short.MIN_VALUE);
+ sortedDoubleTest(Integer.MAX_VALUE);
+ sortedDoubleTest(Integer.MIN_VALUE);
+ sortedDoubleTest(Long.MAX_VALUE);
+ sortedDoubleTest(Long.MIN_VALUE);
+ sortedDoubleTest(Float.MAX_VALUE);
+ sortedDoubleTest(Float.MIN_VALUE);
+ sortedDoubleTest(Double.MAX_VALUE - 1);
+ sortedDoubleTest(Double.MAX_VALUE);
+ sortedDoubleTest(Double.MAX_VALUE + 1);
+ sortedDoubleTest(Double.MIN_VALUE + 1);
+ sortedDoubleTest(Double.MIN_VALUE);
+ sortedDoubleTest(Double.MIN_VALUE - 1);
+ sortedDoubleTest(0x7F);
+ sortedDoubleTest(0xFF);
+ sortedDoubleTest(0x7FFF);
+ sortedDoubleTest(0xFFFF);
+ sortedDoubleTest(0x7FFFFFFF);
+ sortedDoubleTest(0xFFFFFFFF);
+ sortedDoubleTest(0x7FFFFFFFFFFFFFFFL);
+ sortedDoubleTest(0xFFFFFFFFFFFFFFFFL);
+
+ out.reset();
+ out.writeSortedDouble(0);
+ out.writeSortedDouble(1);
+ out.writeSortedDouble(-1);
+ assertEquals(3 * 8, out.size());
+ copyOutputToInput();
+ assertEquals(0, in.readSortedDouble(), 0);
+ assertEquals(1, in.readSortedDouble(), 0);
+ assertEquals(-1, in.readSortedDouble(), 0);
+ assertEquals(0, in.available(), 0);
+ }
+
+ private void packedIntTest(int val, int size) {
+
+ out.reset();
+ out.writePackedInt(val);
+ assertEquals(size, out.size());
+ copyOutputToInput();
+ assertEquals(size, in.getPackedIntByteLength());
+ assertEquals(val, in.readPackedInt());
+ }
+
+ public void testPackedInt() {
+
+ /* Exhaustive value testing is in PackedIntTest. */
+ packedIntTest(119, 1);
+ packedIntTest(0xFFFF + 119, 3);
+ packedIntTest(Integer.MAX_VALUE, 5);
+
+ out.reset();
+ out.writePackedInt(119);
+ out.writePackedInt(0xFFFF + 119);
+ out.writePackedInt(Integer.MAX_VALUE);
+ assertEquals(1 + 3 + 5, out.size());
+ copyOutputToInput();
+ assertEquals(119, in.readPackedInt(), 0);
+ assertEquals(0xFFFF + 119, in.readPackedInt(), 0);
+ assertEquals(Integer.MAX_VALUE, in.readPackedInt(), 0);
+ assertEquals(0, in.available(), 0);
+ }
+
+ private void packedLongTest(long val, int size) {
+
+ out.reset();
+ out.writePackedLong(val);
+ assertEquals(size, out.size());
+ copyOutputToInput();
+ assertEquals(size, in.getPackedLongByteLength());
+ assertEquals(val, in.readPackedLong());
+ }
+
+ public void testPackedLong() {
+
+ /* Exhaustive value testing is in PackedIntTest. */
+ packedLongTest(119, 1);
+ packedLongTest(0xFFFFFFFFL + 119, 5);
+ packedLongTest(Long.MAX_VALUE, 9);
+
+ out.reset();
+ out.writePackedLong(119);
+ out.writePackedLong(0xFFFFFFFFL + 119);
+ out.writePackedLong(Long.MAX_VALUE);
+ assertEquals(1 + 5 + 9, out.size());
+ copyOutputToInput();
+ assertEquals(119, in.readPackedLong(), 0);
+ assertEquals(0xFFFFFFFFL + 119, in.readPackedLong(), 0);
+ assertEquals(Long.MAX_VALUE, in.readPackedLong(), 0);
+ assertEquals(0, in.available(), 0);
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java
new file mode 100644
index 0000000..4a70246
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/bind/tuple/test/TupleOrderingTest.java
@@ -0,0 +1,477 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.bind.tuple.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.tuple.TupleOutput;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleOrderingTest extends TestCase {
+
+ private TupleOutput out;
+ private byte[] prevBuf;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(TupleOrderingTest.class);
+ return suite;
+ }
+
+ public TupleOrderingTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+
+ SharedTestUtils.printTestName("TupleOrderingTest." + getName());
+ out = new TupleOutput();
+ prevBuf = null;
+ }
+
+ @Override
+ public void tearDown() {
+
+ /* Ensure that GC can cleanup. */
+ out = null;
+ prevBuf = null;
+ }
+
+ /**
+ * Each tuple written must be strictly less than (by comparison of bytes)
+ * the tuple written just before it. The check() method compares bytes
+ * just written to those written before the previous call to check().
+ */
+ private void check() {
+
+ check(-1);
+ }
+
+ private void check(int dataIndex) {
+
+ byte[] buf = new byte[out.size()];
+ System.arraycopy(out.getBufferBytes(), out.getBufferOffset(),
+ buf, 0, buf.length);
+ if (prevBuf != null) {
+ int errOffset = -1;
+ int len = Math.min(prevBuf.length, buf.length);
+ boolean areEqual = true;
+ for (int i = 0; i < len; i += 1) {
+ int val1 = prevBuf[i] & 0xFF;
+ int val2 = buf[i] & 0xFF;
+ if (val1 < val2) {
+ areEqual = false;
+ break;
+ } else if (val1 > val2) {
+ errOffset = i;
+ break;
+ }
+ }
+ if (areEqual) {
+ if (prevBuf.length < buf.length) {
+ areEqual = false;
+ } else if (prevBuf.length > buf.length) {
+ areEqual = false;
+ errOffset = buf.length + 1;
+ }
+ }
+ if (errOffset != -1 || areEqual) {
+ StringBuffer msg = new StringBuffer();
+ if (errOffset != -1) {
+ msg.append("Left >= right at byte offset " + errOffset);
+ } else if (areEqual) {
+ msg.append("Bytes are equal");
+ } else {
+ throw new IllegalStateException();
+ }
+ msg.append("\nLeft hex bytes: ");
+ for (int i = 0; i < prevBuf.length; i += 1) {
+ msg.append(' ');
+ int val = prevBuf[i] & 0xFF;
+ if ((val & 0xF0) == 0) {
+ msg.append('0');
+ }
+ msg.append(Integer.toHexString(val));
+ }
+ msg.append("\nRight hex bytes:");
+ for (int i = 0; i < buf.length; i += 1) {
+ msg.append(' ');
+ int val = buf[i] & 0xFF;
+ if ((val & 0xF0) == 0) {
+ msg.append('0');
+ }
+ msg.append(Integer.toHexString(val));
+ }
+ if (dataIndex >= 0) {
+ msg.append("\nData index: " + dataIndex);
+ }
+ fail(msg.toString());
+ }
+ }
+ prevBuf = buf;
+ out.reset();
+ }
+
+ private void reset() {
+
+ prevBuf = null;
+ out.reset();
+ }
+
+ public void testString() {
+
+ final String[] DATA = {
+ "", "\u0001", "\u0002",
+ "A", "a", "ab", "b", "bb", "bba",
+ "c", "c\u0001", "d",
+ new String(new char[] { 0x7F }),
+ new String(new char[] { 0x7F, 0 }),
+ new String(new char[] { 0xFF }),
+ new String(new char[] { Character.MAX_VALUE }),
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeString(DATA[i]);
+ check(i);
+ }
+ reset();
+ out.writeString("a");
+ check();
+ out.writeString("a");
+ out.writeString("");
+ check();
+ out.writeString("a");
+ out.writeString("");
+ out.writeString("a");
+ check();
+ out.writeString("a");
+ out.writeString("b");
+ check();
+ out.writeString("aa");
+ check();
+ out.writeString("b");
+ check();
+ }
+
+ public void testFixedString() {
+
+ final char[][] DATA = {
+ {}, {'a'}, {'a', 'b'}, {'b'}, {'b', 'b'}, {0x7F}, {0xFF},
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeString(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testChars() {
+
+ final char[][] DATA = {
+ {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'},
+ {0x7F}, {0x7F, 0}, {0xFF}, {0xFF, 0},
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeChars(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testBytes() {
+
+ final char[][] DATA = {
+ {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'},
+ {0x7F}, {0xFF},
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeBytes(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testBoolean() {
+
+ final boolean[] DATA = {
+ false, true
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeBoolean(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testUnsignedByte() {
+
+ final int[] DATA = {
+ 0, 1, 0x7F, 0xFF
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeUnsignedByte(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testUnsignedShort() {
+
+ final int[] DATA = {
+ 0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeUnsignedShort(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testUnsignedInt() {
+
+ final long[] DATA = {
+ 0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF, 0x80000,
+ 0x7FFFFFFF, 0x80000000, 0xFFFFFFFF
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeUnsignedInt(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testByte() {
+
+ final byte[] DATA = {
+ Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+ -1, 0, 1,
+ Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeByte(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testShort() {
+
+ final short[] DATA = {
+ Short.MIN_VALUE, Short.MIN_VALUE + 1,
+ Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+ -1, 0, 1,
+ Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+ Short.MAX_VALUE - 1, Short.MAX_VALUE,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeShort(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testInt() {
+
+ final int[] DATA = {
+ Integer.MIN_VALUE, Integer.MIN_VALUE + 1,
+ Short.MIN_VALUE, Short.MIN_VALUE + 1,
+ Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+ -1, 0, 1,
+ Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+ Short.MAX_VALUE - 1, Short.MAX_VALUE,
+ Integer.MAX_VALUE - 1, Integer.MAX_VALUE,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeInt(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testLong() {
+
+ final long[] DATA = {
+ Long.MIN_VALUE, Long.MIN_VALUE + 1,
+ Integer.MIN_VALUE, Integer.MIN_VALUE + 1,
+ Short.MIN_VALUE, Short.MIN_VALUE + 1,
+ Byte.MIN_VALUE, Byte.MIN_VALUE + 1,
+ -1, 0, 1,
+ Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+ Short.MAX_VALUE - 1, Short.MAX_VALUE,
+ Integer.MAX_VALUE - 1, Integer.MAX_VALUE,
+ Long.MAX_VALUE - 1, Long.MAX_VALUE,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeLong(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testFloat() {
+
+ // Only positive floats and doubles are ordered deterministically
+
+ final float[] DATA = {
+ 0, Float.MIN_VALUE, 2 * Float.MIN_VALUE,
+ (float) 0.01, (float) 0.02, (float) 0.99,
+ 1, (float) 1.01, (float) 1.02, (float) 1.99,
+ Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+ Short.MAX_VALUE - 1, Short.MAX_VALUE,
+ Integer.MAX_VALUE,
+ Long.MAX_VALUE / 2, Long.MAX_VALUE,
+ Float.MAX_VALUE,
+ Float.POSITIVE_INFINITY,
+ Float.NaN,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeFloat(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testDouble() {
+
+ // Only positive floats and doubles are ordered deterministically
+
+ final double[] DATA = {
+ 0, Double.MIN_VALUE, 2 * Double.MIN_VALUE,
+ 0.001, 0.002, 0.999,
+ 1, 1.001, 1.002, 1.999,
+ Byte.MAX_VALUE - 1, Byte.MAX_VALUE,
+ Short.MAX_VALUE - 1, Short.MAX_VALUE,
+ Integer.MAX_VALUE - 1, Integer.MAX_VALUE,
+ Long.MAX_VALUE / 2, Long.MAX_VALUE,
+ Float.MAX_VALUE, Double.MAX_VALUE,
+ Double.POSITIVE_INFINITY,
+ Double.NaN,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeDouble(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testSortedFloat() {
+
+ final float[] DATA = {
+ Float.NEGATIVE_INFINITY,
+ (- Float.MAX_VALUE),
+ Long.MIN_VALUE,
+ Long.MIN_VALUE / 2,
+ Integer.MIN_VALUE,
+ Short.MIN_VALUE,
+ Short.MIN_VALUE + 1,
+ Byte.MIN_VALUE,
+ Byte.MIN_VALUE + 1,
+ (float) -1.99,
+ (float) -1.02,
+ (float) -1.01,
+ -1,
+ (float) -0.99,
+ (float) -0.02,
+ (float) -0.01,
+ 2 * (- Float.MIN_VALUE),
+ (- Float.MIN_VALUE),
+ 0,
+ Float.MIN_VALUE,
+ 2 * Float.MIN_VALUE,
+ (float) 0.01,
+ (float) 0.02,
+ (float) 0.99,
+ 1,
+ (float) 1.01,
+ (float) 1.02,
+ (float) 1.99,
+ Byte.MAX_VALUE - 1,
+ Byte.MAX_VALUE,
+ Short.MAX_VALUE - 1,
+ Short.MAX_VALUE,
+ Integer.MAX_VALUE,
+ Long.MAX_VALUE / 2,
+ Long.MAX_VALUE,
+ Float.MAX_VALUE,
+ Float.POSITIVE_INFINITY,
+ Float.NaN,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeSortedFloat(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testSortedDouble() {
+
+ final double[] DATA = {
+ Double.NEGATIVE_INFINITY,
+ (- Double.MAX_VALUE),
+ (- Float.MAX_VALUE),
+ Long.MIN_VALUE,
+ Long.MIN_VALUE / 2,
+ Integer.MIN_VALUE,
+ Short.MIN_VALUE,
+ Short.MIN_VALUE + 1,
+ Byte.MIN_VALUE,
+ Byte.MIN_VALUE + 1,
+ -1.999,
+ -1.002,
+ -1.001,
+ -1,
+ -0.999,
+ -0.002,
+ -0.001,
+ 2 * (- Double.MIN_VALUE),
+ (- Double.MIN_VALUE),
+ 0,
+ Double.MIN_VALUE,
+ 2 * Double.MIN_VALUE,
+ 0.001,
+ 0.002,
+ 0.999,
+ 1,
+ 1.001,
+ 1.002,
+ 1.999,
+ Byte.MAX_VALUE - 1,
+ Byte.MAX_VALUE,
+ Short.MAX_VALUE - 1,
+ Short.MAX_VALUE,
+ Integer.MAX_VALUE - 1,
+ Integer.MAX_VALUE,
+ Long.MAX_VALUE / 2,
+ Long.MAX_VALUE,
+ Float.MAX_VALUE,
+ Double.MAX_VALUE,
+ Double.POSITIVE_INFINITY,
+ Double.NaN,
+ };
+ for (int i = 0; i < DATA.length; i += 1) {
+ out.writeSortedDouble(DATA[i]);
+ check(i);
+ }
+ }
+
+ public void testPackedIntAndLong() {
+ /* Only packed int/long values from 0 to 630 are ordered correctly */
+ for (int i = 0; i <= 630; i += 1) {
+ out.writePackedInt(i);
+ check(i);
+ }
+ reset();
+ for (int i = 0; i <= 630; i += 1) {
+ out.writePackedLong(i);
+ check(i);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java
new file mode 100644
index 0000000..aeba1bc
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/KeyRangeTest.java
@@ -0,0 +1,440 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections;
+
+import java.io.File;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Comparator;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.ByteArrayBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.OperationStatus;
+import com.sleepycat.util.keyrange.KeyRange;
+import com.sleepycat.util.keyrange.KeyRangeException;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class KeyRangeTest extends TestCase {
+
+ private static boolean VERBOSE = false;
+
+ private static final byte FF = (byte) 0xFF;
+
+ private static final byte[][] KEYS = {
+ /* 0 */ {1},
+ /* 1 */ {FF},
+ /* 2 */ {FF, 0},
+ /* 3 */ {FF, 0x7F},
+ /* 4 */ {FF, FF},
+ /* 5 */ {FF, FF, 0},
+ /* 6 */ {FF, FF, 0x7F},
+ /* 7 */ {FF, FF, FF},
+ };
+ private static byte[][] EXTREME_KEY_BYTES = {
+ /* 0 */ {0},
+ /* 1 */ {FF, FF, FF, FF},
+ };
+
+ private Environment env;
+ private Database store;
+ private DataView view;
+ private DataCursor cursor;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+
+ return new TestSuite(KeyRangeTest.class);
+ }
+
+ public KeyRangeTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+ SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
+ }
+
+ private void openDb(Comparator<byte []> comparator)
+ throws Exception {
+
+ File dir = SharedTestUtils.getNewDir();
+ ByteArrayBinding dataBinding = new ByteArrayBinding();
+ EnvironmentConfig envConfig = new EnvironmentConfig();
+ envConfig.setAllowCreate(true);
+ DbCompat.setInitializeCache(envConfig, true);
+ env = new Environment(dir, envConfig);
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ DbCompat.setTypeBtree(dbConfig);
+ dbConfig.setAllowCreate(true);
+ if (comparator != null) {
+ DbCompat.setBtreeComparator(dbConfig, comparator);
+ }
+ store = DbCompat.testOpenDatabase
+ (env, null, "test.db", null, dbConfig);
+ view = new DataView(store, dataBinding, dataBinding, null, true, null);
+ }
+
+ private void closeDb()
+ throws Exception {
+
+ store.close();
+ store = null;
+ env.close();
+ env = null;
+ }
+
+ @Override
+ public void tearDown() {
+ try {
+ if (store != null) {
+ store.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Exception ignored during close: " + e);
+ }
+ try {
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Exception ignored during close: " + e);
+ }
+ /* Ensure that GC can cleanup. */
+ env = null;
+ store = null;
+ view = null;
+ cursor = null;
+ }
+
+ public void testScan() throws Exception {
+ openDb(null);
+ doScan(false);
+ closeDb();
+ }
+
+ public void testScanComparator() throws Exception {
+ openDb(new ReverseComparator());
+ doScan(true);
+ closeDb();
+ }
+
+ private void doScan(boolean reversed) throws Exception {
+
+ byte[][] keys = new byte[KEYS.length][];
+ final int end = KEYS.length - 1;
+ cursor = new DataCursor(view, true);
+ for (int i = 0; i <= end; i++) {
+ keys[i] = KEYS[i];
+ cursor.put(keys[i], KEYS[i], null, false);
+ }
+ cursor.close();
+ byte[][] extremeKeys = new byte[EXTREME_KEY_BYTES.length][];
+ for (int i = 0; i < extremeKeys.length; i++) {
+ extremeKeys[i] = EXTREME_KEY_BYTES[i];
+ }
+
+ // with empty range
+
+ cursor = new DataCursor(view, false);
+ expectRange(KEYS, 0, end, reversed);
+ cursor.close();
+
+ // begin key only, inclusive
+
+ for (int i = 0; i <= end; i++) {
+ cursor = newCursor(view, keys[i], true, null, false, reversed);
+ expectRange(KEYS, i, end, reversed);
+ cursor.close();
+ }
+
+ // begin key only, exclusive
+
+ for (int i = 0; i <= end; i++) {
+ cursor = newCursor(view, keys[i], false, null, false, reversed);
+ expectRange(KEYS, i + 1, end, reversed);
+ cursor.close();
+ }
+
+ // end key only, inclusive
+
+ for (int i = 0; i <= end; i++) {
+ cursor = newCursor(view, null, false, keys[i], true, reversed);
+ expectRange(KEYS, 0, i, reversed);
+ cursor.close();
+ }
+
+ // end key only, exclusive
+
+ for (int i = 0; i <= end; i++) {
+ cursor = newCursor(view, null, false, keys[i], false, reversed);
+ expectRange(KEYS, 0, i - 1, reversed);
+ cursor.close();
+ }
+
+ // begin and end keys, inclusive and exclusive
+
+ for (int i = 0; i <= end; i++) {
+ for (int j = i; j <= end; j++) {
+ // begin inclusive, end inclusive
+
+ cursor = newCursor(view, keys[i], true, keys[j],
+ true, reversed);
+ expectRange(KEYS, i, j, reversed);
+ cursor.close();
+
+ // begin inclusive, end exclusive
+
+ cursor = newCursor(view, keys[i], true, keys[j],
+ false, reversed);
+ expectRange(KEYS, i, j - 1, reversed);
+ cursor.close();
+
+ // begin exclusive, end inclusive
+
+ cursor = newCursor(view, keys[i], false, keys[j],
+ true, reversed);
+ expectRange(KEYS, i + 1, j, reversed);
+ cursor.close();
+
+ // begin exclusive, end exclusive
+
+ cursor = newCursor(view, keys[i], false, keys[j],
+ false, reversed);
+ expectRange(KEYS, i + 1, j - 1, reversed);
+ cursor.close();
+ }
+ }
+
+ // single key range
+
+ for (int i = 0; i <= end; i++) {
+ cursor = new DataCursor(view, false, keys[i]);
+ expectRange(KEYS, i, i, reversed);
+ cursor.close();
+ }
+
+ // start with lower extreme (before any existing key)
+
+ cursor = newCursor(view, extremeKeys[0], true, null, false, reversed);
+ expectRange(KEYS, 0, end, reversed);
+ cursor.close();
+
+ // start with higher extreme (after any existing key)
+
+ cursor = newCursor(view, null, false, extremeKeys[1], true, reversed);
+ expectRange(KEYS, 0, end, reversed);
+ cursor.close();
+ }
+
+ private DataCursor newCursor(DataView view,
+ Object beginKey, boolean beginInclusive,
+ Object endKey, boolean endInclusive,
+ boolean reversed)
+ throws Exception {
+
+ if (reversed) {
+ return new DataCursor(view, false,
+ endKey, endInclusive,
+ beginKey, beginInclusive);
+ } else {
+ return new DataCursor(view, false,
+ beginKey, beginInclusive,
+ endKey, endInclusive);
+ }
+ }
+
+ private void expectRange(byte[][] bytes, int first, int last,
+ boolean reversed)
+ throws DatabaseException {
+
+ int i;
+ boolean init;
+ for (init = true, i = first;; i++, init = false) {
+ if (checkRange(bytes, first, last, i <= last,
+ reversed, !reversed, init, i)) {
+ break;
+ }
+ }
+ for (init = true, i = last;; i--, init = false) {
+ if (checkRange(bytes, first, last, i >= first,
+ reversed, reversed, init, i)) {
+ break;
+ }
+ }
+ }
+
+ private boolean checkRange(byte[][] bytes, int first, int last,
+ boolean inRange, boolean reversed,
+ boolean forward, boolean init,
+ int i)
+ throws DatabaseException {
+
+ OperationStatus s;
+ if (forward) {
+ if (init) {
+ s = cursor.getFirst(false);
+ } else {
+ s = cursor.getNext(false);
+ }
+ } else {
+ if (init) {
+ s = cursor.getLast(false);
+ } else {
+ s = cursor.getPrev(false);
+ }
+ }
+
+ String msg = " " + (forward ? "next" : "prev") + " i=" + i +
+ " first=" + first + " last=" + last +
+ (reversed ? " reversed" : " not reversed");
+
+ // check that moving past ends doesn't move the cursor
+ if (s == OperationStatus.SUCCESS && i == first) {
+ OperationStatus s2 = reversed ? cursor.getNext(false)
+ : cursor.getPrev(false);
+ assertEquals(msg, OperationStatus.NOTFOUND, s2);
+ }
+ if (s == OperationStatus.SUCCESS && i == last) {
+ OperationStatus s2 = reversed ? cursor.getPrev(false)
+ : cursor.getNext(false);
+ assertEquals(msg, OperationStatus.NOTFOUND, s2);
+ }
+
+ byte[] val = (s == OperationStatus.SUCCESS)
+ ? ((byte[]) cursor.getCurrentValue())
+ : null;
+
+ if (inRange) {
+ assertNotNull("RangeNotFound" + msg, val);
+
+ if (!Arrays.equals(val, bytes[i])){
+ printBytes(val);
+ printBytes(bytes[i]);
+ fail("RangeKeyNotEqual" + msg);
+ }
+ if (VERBOSE) {
+ System.out.println("GotRange" + msg);
+ }
+ return false;
+ } else {
+ assertEquals("RangeExceeded" + msg, OperationStatus.NOTFOUND, s);
+ return true;
+ }
+ }
+
+ private void printBytes(byte[] bytes) {
+
+ for (int i = 0; i < bytes.length; i += 1) {
+ System.out.print(Integer.toHexString(bytes[i] & 0xFF));
+ System.out.print(' ');
+ }
+ System.out.println();
+ }
+
+ public void testSubRanges() {
+
+ DatabaseEntry begin = new DatabaseEntry();
+ DatabaseEntry begin2 = new DatabaseEntry();
+ DatabaseEntry end = new DatabaseEntry();
+ DatabaseEntry end2 = new DatabaseEntry();
+ KeyRange range = new KeyRange(null);
+ KeyRange range2 = null;
+
+ /* Base range [1, 2] */
+ begin.setData(new byte[] { 1 });
+ end.setData(new byte[] { 2 });
+ range = range.subRange(begin, true, end, true);
+
+ /* Subrange (0, 1] is invalid **. */
+ begin2.setData(new byte[] { 0 });
+ end2.setData(new byte[] { 1 });
+ try {
+ range2 = range.subRange(begin2, false, end2, true);
+ fail();
+ } catch (KeyRangeException expected) {}
+
+ /* Subrange [1, 3) is invalid. */
+ begin2.setData(new byte[] { 1 });
+ end2.setData(new byte[] { 3 });
+ try {
+ range2 = range.subRange(begin2, true, end2, false);
+ fail();
+ } catch (KeyRangeException expected) {}
+
+ /* Subrange [2, 2] is valid. */
+ begin2.setData(new byte[] { 2 });
+ end2.setData(new byte[] { 2 });
+ range2 = range.subRange(begin2, true, end2, true);
+
+ /* Subrange [0, 1] is invalid. */
+ begin2.setData(new byte[] { 0 });
+ end2.setData(new byte[] { 1 });
+ try {
+ range2 = range.subRange(begin2, true, end2, true);
+ fail();
+ } catch (KeyRangeException expected) {}
+
+ /* Subrange (0, 3] is invalid. */
+ begin2.setData(new byte[] { 0 });
+ end2.setData(new byte[] { 3 });
+ try {
+ range2 = range.subRange(begin2, false, end2, true);
+ fail();
+ } catch (KeyRangeException expected) {}
+
+ /* Subrange [3, 3) is invalid. */
+ begin2.setData(new byte[] { 3 });
+ end2.setData(new byte[] { 3 });
+ try {
+ range2 = range.subRange(begin2, true, end2, false);
+ fail();
+ } catch (KeyRangeException expected) {}
+ }
+
+ @SuppressWarnings("serial")
+ public static class ReverseComparator implements Comparator<byte[]>,
+ Serializable {
+ public int compare(byte[] d1, byte[] d2) {
+ int cmp = KeyRange.compareBytes(d1, 0, d1.length,
+ d2, 0, d2.length);
+ if (cmp < 0) {
+ return 1;
+ } else if (cmp > 0) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java
new file mode 100644
index 0000000..e690c01
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/CollectionTest.java
@@ -0,0 +1,3048 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.concurrent.ConcurrentMap;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.collections.MapEntryParameter;
+import com.sleepycat.collections.StoredCollection;
+import com.sleepycat.collections.StoredCollections;
+import com.sleepycat.collections.StoredContainer;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredKeySet;
+import com.sleepycat.collections.StoredList;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.StoredSortedEntrySet;
+import com.sleepycat.collections.StoredSortedKeySet;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.StoredSortedValueSet;
+import com.sleepycat.collections.StoredValueSet;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class CollectionTest extends TestCase {
+
+ private static final int NONE = 0;
+ private static final int SUB = 1;
+ private static final int HEAD = 2;
+ private static final int TAIL = 3;
+
+ /*
+ * For long tests we permute testStoredIterator to test both StoredIterator
+ * and BlockIterator. When testing BlockIterator, we permute the maxKey
+ * over the array values below. BlockIterator's block size is 10. So we
+ * test below the block size (6), at the block size (10), and above it (14
+ * and 22).
+ */
+ private static final int DEFAULT_MAX_KEY = 6;
+ private static final int[] MAX_KEYS = {6, 10, 14, 22};
+
+ private boolean testStoredIterator;
+ private int maxKey; /* Must be a multiple of 2. */
+ private int beginKey = 1;
+ private int endKey;
+
+ private Environment env;
+ private Database store;
+ private Database index;
+ private final boolean isEntityBinding;
+ private final boolean isAutoCommit;
+ private TestStore testStore;
+ private String testName;
+ private final EntryBinding keyBinding;
+ private final EntryBinding valueBinding;
+ private final EntityBinding entityBinding;
+ private TransactionRunner readRunner;
+ private TransactionRunner writeRunner;
+ private TransactionRunner writeIterRunner;
+ private TestEnv testEnv;
+
+ private StoredMap map;
+ private StoredMap imap; // insertable map (primary store for indexed map)
+ private StoredSortedMap smap; // sorted map (null or equal to map)
+ private StoredMap saveMap;
+ private StoredSortedMap saveSMap;
+ private int rangeType;
+ private StoredList list;
+ private StoredList ilist; // insertable list (primary store for index list)
+ private StoredList saveList;
+ private StoredKeySet keySet;
+ private StoredValueSet valueSet;
+
+ /**
+ * Runs a command line collection test.
+ * @see #usage
+ */
+ public static void main(String[] args) {
+ if (args.length == 1 &&
+ (args[0].equals("-h") || args[0].equals("-help"))) {
+ usage();
+ } else {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite(args));
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+ }
+
+ private static void usage() {
+
+ System.out.println(
+ "Usage: java com.sleepycat.collections.test.CollectionTest\n" +
+ " -h | -help\n" +
+ " [testName]...\n" +
+ " where testName has the format:\n" +
+ " <env>-<store>-{entity|value}\n" +
+ " <env> is:\n" +
+ " bdb | cdb | txn\n" +
+ " <store> is:\n" +
+ " btree-uniq | btree-dup | btree-dupsort | btree-recnum |\n" +
+ " hash-uniq | hash-dup | hash-dupsort |\n" +
+ " queue | recno | recno-renum\n" +
+ " For example: bdb-btree-uniq-entity\n" +
+ " If no arguments are given then all tests are run.");
+ System.exit(2);
+ }
+
+ public static Test suite() {
+ return suite(null);
+ }
+
+ static Test suite(String[] args) {
+ if (SharedTestUtils.runLongTests()) {
+ TestSuite suite = new TestSuite();
+
+ /* StoredIterator tests. */
+ permuteTests(args, suite, true, DEFAULT_MAX_KEY);
+
+ /* BlockIterator tests with different maxKey values. */
+ for (int i = 0; i < MAX_KEYS.length; i += 1) {
+ permuteTests(args, suite, false, MAX_KEYS[i]);
+ }
+
+ return suite;
+ } else {
+ return baseSuite(args);
+ }
+ }
+
+ private static void permuteTests(String[] args,
+ TestSuite suite,
+ boolean storedIter,
+ int maxKey) {
+ TestSuite baseTests = baseSuite(args);
+ Enumeration e = baseTests.tests();
+ while (e.hasMoreElements()) {
+ CollectionTest t = (CollectionTest) e.nextElement();
+ t.setParams(storedIter, maxKey);
+ suite.addTest(t);
+ }
+ }
+
+ private static TestSuite baseSuite(String[] args) {
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+ for (int j = 0; j < TestStore.ALL.length; j += 1) {
+ for (int k = 0; k < 2; k += 1) {
+ boolean entityBinding = (k != 0);
+
+ addTest(args, suite, new CollectionTest(
+ TestEnv.ALL[i], TestStore.ALL[j],
+ entityBinding, false));
+
+ if (TestEnv.ALL[i].isTxnMode()) {
+ addTest(args, suite, new CollectionTest(
+ TestEnv.ALL[i], TestStore.ALL[j],
+ entityBinding, true));
+ }
+ }
+ }
+ }
+ return suite;
+ }
+
+ private static void addTest(String[] args, TestSuite suite,
+ CollectionTest test) {
+
+ if (args == null || args.length == 0) {
+ suite.addTest(test);
+ } else {
+ for (int t = 0; t < args.length; t += 1) {
+ if (args[t].equals(test.testName)) {
+ suite.addTest(test);
+ break;
+ }
+ }
+ }
+ }
+
+ public CollectionTest(TestEnv testEnv, TestStore testStore,
+ boolean isEntityBinding, boolean isAutoCommit) {
+
+ super(null);
+
+ this.testEnv = testEnv;
+ this.testStore = testStore;
+ this.isEntityBinding = isEntityBinding;
+ this.isAutoCommit = isAutoCommit;
+
+ keyBinding = testStore.getKeyBinding();
+ valueBinding = testStore.getValueBinding();
+ entityBinding = testStore.getEntityBinding();
+
+ setParams(false, DEFAULT_MAX_KEY);
+ }
+
+ private void setParams(boolean storedIter, int maxKey) {
+
+ this.testStoredIterator = storedIter;
+ this.maxKey = maxKey;
+ this.endKey = maxKey;
+
+ testName = testEnv.getName() + '-' + testStore.getName() +
+ (isEntityBinding ? "-entity" : "-value") +
+ (isAutoCommit ? "-autoCommit" : "") +
+ (testStoredIterator ? "-storedIter" : "") +
+ ((maxKey != DEFAULT_MAX_KEY) ? ("-maxKey-" + maxKey) : "");
+ }
+
+ @Override
+ public void tearDown() {
+ setName(testName);
+ }
+
+ @Override
+ public void runTest()
+ throws Exception {
+
+ SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
+ try {
+ env = testEnv.open(testName);
+
+ // For testing auto-commit, use a normal (transactional) runner for
+ // all reading and for writing via an iterator, and a do-nothing
+ // runner for writing via collections; if auto-commit is tested,
+ // the per-collection auto-commit property will be set elsewhere.
+ //
+ TransactionRunner normalRunner = newTransactionRunner(env);
+ normalRunner.setAllowNestedTransactions(
+ DbCompat.NESTED_TRANSACTIONS);
+ TransactionRunner nullRunner = new NullTransactionRunner(env);
+ readRunner = nullRunner;
+ if (isAutoCommit) {
+ writeRunner = nullRunner;
+ writeIterRunner = testStoredIterator ? normalRunner
+ : nullRunner;
+ } else {
+ writeRunner = normalRunner;
+ writeIterRunner = normalRunner;
+ }
+
+ store = testStore.open(env, "unindexed.db");
+ testUnindexed();
+ store.close();
+ store = null;
+
+ TestStore indexOf = testStore.getIndexOf();
+ if (indexOf != null) {
+ store = indexOf.open(env, "indexed.db");
+ index = testStore.openIndex(store, "index.db");
+ testIndexed();
+ index.close();
+ index = null;
+ store.close();
+ store = null;
+ }
+ env.close();
+ env = null;
+ } catch (Exception e) {
+ throw ExceptionUnwrapper.unwrap(e);
+ } finally {
+ if (index != null) {
+ try {
+ index.close();
+ } catch (Exception e) {
+ }
+ }
+ if (store != null) {
+ try {
+ store.close();
+ } catch (Exception e) {
+ }
+ }
+ if (env != null) {
+ try {
+ env.close();
+ } catch (Exception e) {
+ }
+ }
+ /* Ensure that GC can cleanup. */
+ index = null;
+ store = null;
+ env = null;
+ readRunner = null;
+ writeRunner = null;
+ writeIterRunner = null;
+ map = null;
+ imap = null;
+ smap = null;
+ saveMap = null;
+ saveSMap = null;
+ list = null;
+ ilist = null;
+ saveList = null;
+ keySet = null;
+ valueSet = null;
+ testEnv = null;
+ testStore = null;
+ }
+ }
+
+ /**
+ * Is overridden in XACollectionTest.
+ * @throws DatabaseException from subclasses.
+ */
+ protected TransactionRunner newTransactionRunner(Environment env)
+ throws DatabaseException {
+
+ return new TransactionRunner(env);
+ }
+
+ void testCreation(StoredContainer cont, int expectSize) {
+ assertEquals(index != null, cont.isSecondary());
+ assertEquals(testStore.isOrdered(), cont.isOrdered());
+ assertEquals(testStore.areKeyRangesAllowed(),
+ cont.areKeyRangesAllowed());
+ assertEquals(testStore.areKeysRenumbered(), cont.areKeysRenumbered());
+ assertEquals(testStore.areDuplicatesAllowed(),
+ cont.areDuplicatesAllowed());
+ assertEquals(testEnv.isTxnMode(), cont.isTransactional());
+ assertEquals(expectSize, cont.size());
+ }
+
+ void testMapCreation(ConcurrentMap map) {
+ assertTrue(map.values() instanceof Set);
+ assertEquals(testStore.areKeyRangesAllowed(),
+ map.keySet() instanceof SortedSet);
+ assertEquals(testStore.areKeyRangesAllowed(),
+ map.entrySet() instanceof SortedSet);
+ assertEquals(testStore.areKeyRangesAllowed() && isEntityBinding,
+ map.values() instanceof SortedSet);
+ }
+
+ void testUnindexed()
+ throws Exception {
+
+ // create primary map
+ if (testStore.areKeyRangesAllowed()) {
+ if (isEntityBinding) {
+ smap = new StoredSortedMap(store, keyBinding,
+ entityBinding,
+ testStore.getKeyAssigner());
+ valueSet = new StoredSortedValueSet(store, entityBinding,
+ true);
+ } else {
+ smap = new StoredSortedMap(store, keyBinding,
+ valueBinding,
+ testStore.getKeyAssigner());
+ // sorted value set is not possible since key cannot be derived
+ // for performing subSet, etc.
+ }
+ keySet = new StoredSortedKeySet(store, keyBinding, true);
+ map = smap;
+ } else {
+ if (isEntityBinding) {
+ map = new StoredMap(store, keyBinding, entityBinding,
+ testStore.getKeyAssigner());
+ valueSet = new StoredValueSet(store, entityBinding, true);
+ } else {
+ map = new StoredMap(store, keyBinding, valueBinding,
+ testStore.getKeyAssigner());
+ valueSet = new StoredValueSet(store, valueBinding, true);
+ }
+ smap = null;
+ keySet = new StoredKeySet(store, keyBinding, true);
+ }
+ imap = map;
+
+ // create primary list
+ if (testStore.hasRecNumAccess()) {
+ if (isEntityBinding) {
+ ilist = new StoredList(store, entityBinding,
+ testStore.getKeyAssigner());
+ } else {
+ ilist = new StoredList(store, valueBinding,
+ testStore.getKeyAssigner());
+ }
+ list = ilist;
+ } else {
+ try {
+ if (isEntityBinding) {
+ ilist = new StoredList(store, entityBinding,
+ testStore.getKeyAssigner());
+ } else {
+ ilist = new StoredList(store, valueBinding,
+ testStore.getKeyAssigner());
+ }
+ fail();
+ } catch (IllegalArgumentException expected) {}
+ }
+
+ testCreation(map, 0);
+ if (list != null) {
+ testCreation(list, 0);
+ }
+ testMapCreation(map);
+ addAll();
+ testAll();
+ }
+
+ void testIndexed()
+ throws Exception {
+
+ // create primary map
+ if (isEntityBinding) {
+ map = new StoredMap(store, keyBinding, entityBinding,
+ testStore.getKeyAssigner());
+ } else {
+ map = new StoredMap(store, keyBinding, valueBinding,
+ testStore.getKeyAssigner());
+ }
+ imap = map;
+ smap = null;
+ // create primary list
+ if (testStore.hasRecNumAccess()) {
+ if (isEntityBinding) {
+ list = new StoredList(store, entityBinding,
+ testStore.getKeyAssigner());
+ } else {
+ list = new StoredList(store, valueBinding,
+ testStore.getKeyAssigner());
+ }
+ ilist = list;
+ }
+
+ addAll();
+ readAll();
+
+ // create indexed map (keySet/valueSet)
+ if (testStore.areKeyRangesAllowed()) {
+ if (isEntityBinding) {
+ map = smap = new StoredSortedMap(index, keyBinding,
+ entityBinding, true);
+ valueSet = new StoredSortedValueSet(index, entityBinding,
+ true);
+ } else {
+ map = smap = new StoredSortedMap(index, keyBinding,
+ valueBinding, true);
+ // sorted value set is not possible since key cannot be derived
+ // for performing subSet, etc.
+ }
+ keySet = new StoredSortedKeySet(index, keyBinding, true);
+ } else {
+ if (isEntityBinding) {
+ map = new StoredMap(index, keyBinding, entityBinding, true);
+ valueSet = new StoredValueSet(index, entityBinding, true);
+ } else {
+ map = new StoredMap(index, keyBinding, valueBinding, true);
+ valueSet = new StoredValueSet(index, valueBinding, true);
+ }
+ smap = null;
+ keySet = new StoredKeySet(index, keyBinding, true);
+ }
+
+ // create indexed list
+ if (testStore.hasRecNumAccess()) {
+ if (isEntityBinding) {
+ list = new StoredList(index, entityBinding, true);
+ } else {
+ list = new StoredList(index, valueBinding, true);
+ }
+ } else {
+ try {
+ if (isEntityBinding) {
+ list = new StoredList(index, entityBinding, true);
+ } else {
+ list = new StoredList(index, valueBinding, true);
+ }
+ fail();
+ } catch (IllegalArgumentException expected) {}
+ }
+
+ testCreation(map, maxKey);
+ testCreation((StoredContainer) map.values(), maxKey);
+ testCreation((StoredContainer) map.keySet(), maxKey);
+ testCreation((StoredContainer) map.entrySet(), maxKey);
+ if (list != null) {
+ testCreation(list, maxKey);
+ }
+ testMapCreation(map);
+ testAll();
+ }
+
+ void testAll()
+ throws Exception {
+
+ checkKeySetAndValueSet();
+ readAll();
+ updateAll();
+ readAll();
+ if (!map.areKeysRenumbered()) {
+ removeOdd();
+ readEven();
+ addOdd();
+ readAll();
+ removeOddIter();
+ readEven();
+ if (imap.areDuplicatesAllowed()) {
+ addOddDup();
+ } else {
+ addOdd();
+ }
+ readAll();
+ removeOddEntry();
+ readEven();
+ addOdd();
+ readAll();
+ if (isEntityBinding) {
+ removeOddEntity();
+ readEven();
+ addOddEntity();
+ readAll();
+ }
+ bulkOperations();
+ }
+ if (isListAddAllowed()) {
+ removeOddList();
+ readEvenList();
+ addOddList();
+ readAll();
+ if (!isEntityBinding) {
+ removeOddListValue();
+ readEvenList();
+ addOddList();
+ readAll();
+ }
+ }
+ if (list != null) {
+ bulkListOperations();
+ } else {
+ listOperationsNotAllowed();
+ }
+ if (smap != null) {
+ readWriteRange(SUB, 1, 1);
+ readWriteRange(HEAD, 1, 1);
+ readWriteRange(SUB, 1, maxKey);
+ readWriteRange(HEAD, 1, maxKey);
+ readWriteRange(TAIL, 1, maxKey);
+ readWriteRange(SUB, 1, 3);
+ readWriteRange(HEAD, 1, 3);
+ readWriteRange(SUB, 2, 2);
+ readWriteRange(SUB, 2, maxKey);
+ readWriteRange(TAIL, 2, maxKey);
+ readWriteRange(SUB, maxKey, maxKey);
+ readWriteRange(TAIL, maxKey, maxKey);
+ readWriteRange(SUB, maxKey + 1, maxKey + 1);
+ readWriteRange(TAIL, maxKey + 1, maxKey + 1);
+ readWriteRange(SUB, 0, 0);
+ readWriteRange(HEAD, 0, 0);
+ }
+ updateAll();
+ readAll();
+ if (map.areDuplicatesAllowed()) {
+ readWriteDuplicates();
+ readAll();
+ } else {
+ duplicatesNotAllowed();
+ readAll();
+ }
+ if (testEnv.isCdbMode()) {
+ testCdbLocking();
+ }
+ removeAll();
+ if (!map.areKeysRenumbered()) {
+ testConcurrentMap();
+ }
+ if (isListAddAllowed()) {
+ testIterAddList();
+ clearAll();
+ }
+ if (imap.areDuplicatesAllowed()) {
+ testIterAddDuplicates();
+ clearAll();
+ }
+ if (isListAddAllowed()) {
+ addAllList();
+ readAll();
+ removeAllList();
+ }
+ appendAll();
+ }
+
+ void checkKeySetAndValueSet() {
+
+ // use bulk operations to check that explicitly constructed
+ // keySet/valueSet are equivalent
+ assertEquals(keySet, imap.keySet());
+ if (valueSet != null) {
+ assertEquals(valueSet, imap.values());
+ }
+ }
+
+ Iterator iterator(Collection storedCollection) {
+
+ if (testStoredIterator) {
+ return ((StoredCollection) storedCollection).storedIterator();
+ } else {
+ return storedCollection.iterator();
+ }
+ }
+
+ void addAll()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ assertTrue(imap.isEmpty());
+ Iterator iter = iterator(imap.entrySet());
+ try {
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ assertEquals(0, imap.keySet().toArray().length);
+ assertEquals(0, imap.keySet().toArray(new Object[0]).length);
+ assertEquals(0, imap.entrySet().toArray().length);
+ assertEquals(0, imap.entrySet().toArray(new Object[0]).length);
+ assertEquals(0, imap.values().toArray().length);
+ assertEquals(0, imap.values().toArray(new Object[0]).length);
+
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ assertNull(imap.get(key));
+ assertTrue(!imap.keySet().contains(key));
+ assertTrue(!imap.values().contains(val));
+ assertNull(imap.put(key, val));
+ assertEquals(val, imap.get(key));
+ assertTrue(imap.keySet().contains(key));
+ assertTrue(imap.values().contains(val));
+ assertTrue(imap.duplicates(key).contains(val));
+ if (!imap.areDuplicatesAllowed()) {
+ assertEquals(val, imap.put(key, val));
+ }
+ checkDupsSize(1, imap.duplicates(key));
+ }
+ assertTrue(!imap.isEmpty());
+ }
+ });
+ }
+
+ void appendAll()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ assertTrue(imap.isEmpty());
+
+ TestKeyAssigner keyAssigner = testStore.getKeyAssigner();
+ if (keyAssigner != null) {
+ keyAssigner.reset();
+ }
+
+ for (int i = beginKey; i <= endKey; i += 1) {
+ boolean useList = (i & 1) == 0;
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ assertNull(imap.get(key));
+ if (keyAssigner != null) {
+ if (useList && ilist != null) {
+ assertEquals(i - 1, ilist.append(val));
+ } else {
+ assertEquals(key, imap.append(val));
+ }
+ assertEquals(val, imap.get(key));
+ } else {
+ Long recnoKey;
+ if (useList && ilist != null) {
+ recnoKey = new Long(ilist.append(val) + 1);
+ } else {
+ recnoKey = (Long) imap.append(val);
+ }
+ assertNotNull(recnoKey);
+ Object recnoVal;
+ if (isEntityBinding) {
+ recnoVal = makeEntity(recnoKey.intValue(), i);
+ } else {
+ recnoVal = val;
+ }
+ assertEquals(recnoVal, imap.get(recnoKey));
+ }
+ }
+ }
+ });
+ }
+
+ void updateAll()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ if (!imap.areDuplicatesAllowed()) {
+ assertEquals(val, imap.put(key, val));
+ }
+ if (isEntityBinding) {
+ assertTrue(!imap.values().add(val));
+ }
+ checkDupsSize(1, imap.duplicates(key));
+ if (ilist != null) {
+ int idx = i - 1;
+ assertEquals(val, ilist.set(idx, val));
+ }
+ }
+ updateIter(map.entrySet());
+ updateIter(map.values());
+ if (beginKey <= endKey) {
+ ListIterator iter = (ListIterator) iterator(map.keySet());
+ try {
+ assertNotNull(iter.next());
+ iter.set(makeKey(beginKey));
+ fail();
+ } catch (UnsupportedOperationException e) {
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+ if (list != null) {
+ updateIter(list);
+ }
+ }
+ });
+ }
+
+ void updateIter(final Collection coll)
+ throws Exception {
+
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ ListIterator iter = (ListIterator) iterator(coll);
+ try {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ assertTrue(iter.hasNext());
+ Object obj = iter.next();
+ if (map.isOrdered()) {
+ assertEquals(i, intIter(coll, obj));
+ }
+ if (index != null) {
+ try {
+ setValuePlusOne(iter, obj);
+ fail();
+ } catch (UnsupportedOperationException e) {}
+ } else if
+ (((StoredCollection) coll).areDuplicatesOrdered()) {
+ try {
+ setValuePlusOne(iter, obj);
+ fail();
+ } catch (RuntimeException e) {
+ Exception e2 = ExceptionUnwrapper.unwrap(e);
+ assertTrue(e2.getClass().getName(),
+ e2 instanceof IllegalArgumentException ||
+ e2 instanceof DatabaseException);
+ }
+ } else {
+ setValuePlusOne(iter, obj);
+ /* Ensure iterator position is correct. */
+ if (map.isOrdered()) {
+ assertTrue(iter.hasPrevious());
+ obj = iter.previous();
+ assertEquals(i, intIter(coll, obj));
+ assertTrue(iter.hasNext());
+ obj = iter.next();
+ assertEquals(i, intIter(coll, obj));
+ }
+ }
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+ });
+ }
+
+ void setValuePlusOne(ListIterator iter, Object obj) {
+
+ if (obj instanceof Map.Entry) {
+ Map.Entry entry = (Map.Entry) obj;
+ Long key = (Long) entry.getKey();
+ Object oldVal = entry.getValue();
+ Object val = makeVal(key.intValue() + 1);
+ if (isEntityBinding) {
+ try {
+ // must fail on attempt to change the key via an entity
+ entry.setValue(val);
+ fail();
+ } catch (IllegalArgumentException e) {}
+ val = makeEntity(key.intValue(), key.intValue() + 1);
+ }
+ entry.setValue(val);
+ assertEquals(val, entry.getValue());
+ assertEquals(val, map.get(key));
+ assertTrue(map.duplicates(key).contains(val));
+ checkDupsSize(1, map.duplicates(key));
+ entry.setValue(oldVal);
+ assertEquals(oldVal, entry.getValue());
+ assertEquals(oldVal, map.get(key));
+ assertTrue(map.duplicates(key).contains(oldVal));
+ checkDupsSize(1, map.duplicates(key));
+ } else {
+ Object oldVal = obj;
+ Long key = makeKey(intVal(obj));
+ Object val = makeVal(key.intValue() + 1);
+ if (isEntityBinding) {
+ try {
+ // must fail on attempt to change the key via an entity
+ iter.set(val);
+ fail();
+ } catch (IllegalArgumentException e) {}
+ val = makeEntity(key.intValue(), key.intValue() + 1);
+ }
+ iter.set(val);
+ assertEquals(val, map.get(key));
+ assertTrue(map.duplicates(key).contains(val));
+ checkDupsSize(1, map.duplicates(key));
+ iter.set(oldVal);
+ assertEquals(oldVal, map.get(key));
+ assertTrue(map.duplicates(key).contains(oldVal));
+ checkDupsSize(1, map.duplicates(key));
+ }
+ }
+
+ void removeAll()
+ throws Exception {
+
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ assertTrue(!map.isEmpty());
+ ListIterator iter = null;
+ try {
+ if (list != null) {
+ iter = (ListIterator) iterator(list);
+ } else {
+ iter = (ListIterator) iterator(map.values());
+ }
+ iteratorSetAndRemoveNotAllowed(iter);
+
+ Object val = iter.next();
+ assertNotNull(val);
+ iter.remove();
+ iteratorSetAndRemoveNotAllowed(iter);
+
+ if (index == null) {
+ val = iter.next();
+ assertNotNull(val);
+ iter.set(val);
+
+ if (map.areDuplicatesAllowed()) {
+ iter.add(makeVal(intVal(val), intVal(val) + 1));
+ iteratorSetAndRemoveNotAllowed(iter);
+ }
+ }
+ } finally {
+ StoredIterator.close(iter);
+ }
+ map.clear();
+ assertTrue(map.isEmpty());
+ assertTrue(map.entrySet().isEmpty());
+ assertTrue(map.keySet().isEmpty());
+ assertTrue(map.values().isEmpty());
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ assertNull(map.get(key));
+ assertTrue(!map.duplicates(key).contains(val));
+ checkDupsSize(0, map.duplicates(key));
+ }
+ }
+ });
+ }
+
+ void clearAll()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ map.clear();
+ assertTrue(map.isEmpty());
+ }
+ });
+ }
+
+ /**
+ * Tests that removing while iterating works properly, especially when
+ * removing everything in the key range or everything from some point to
+ * the end of the range. [#15858]
+ */
+ void removeIter()
+ throws Exception {
+
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ ListIterator iter;
+
+ /* Save contents. */
+ HashMap<Object,Object> savedMap =
+ new HashMap<Object,Object>(map);
+ assertEquals(savedMap, map);
+
+ /* Remove all moving forward. */
+ iter = (ListIterator) iterator(map.keySet());
+ try {
+ while (iter.hasNext()) {
+ assertNotNull(iter.next());
+ iter.remove();
+ }
+ assertTrue(!iter.hasNext());
+ assertTrue(!iter.hasPrevious());
+ assertTrue(map.isEmpty());
+ } finally {
+ StoredIterator.close(iter);
+ }
+
+ /* Restore contents. */
+ imap.putAll(savedMap);
+ assertEquals(savedMap, map);
+
+ /* Remove all moving backward. */
+ iter = (ListIterator) iterator(map.keySet());
+ try {
+ while (iter.hasNext()) {
+ assertNotNull(iter.next());
+ }
+ while (iter.hasPrevious()) {
+ assertNotNull(iter.previous());
+ iter.remove();
+ }
+ assertTrue(!iter.hasNext());
+ assertTrue(!iter.hasPrevious());
+ assertTrue(map.isEmpty());
+ } finally {
+ StoredIterator.close(iter);
+ }
+
+ /* Restore contents. */
+ imap.putAll(savedMap);
+ assertEquals(savedMap, map);
+
+ int first = Math.max(1, beginKey);
+ int last = Math.min(maxKey, endKey);
+
+ /* Skip N forward, remove all from that point forward. */
+ for (int readTo = first + 1; readTo <= last; readTo += 1) {
+ iter = (ListIterator) iterator(map.keySet());
+ try {
+ for (int i = first; i < readTo; i += 1) {
+ assertTrue(iter.hasNext());
+ assertNotNull(iter.next());
+ }
+ for (int i = readTo; i <= last; i += 1) {
+ assertTrue(iter.hasNext());
+ assertNotNull(iter.next());
+ iter.remove();
+ }
+ assertTrue(!iter.hasNext());
+ assertTrue(iter.hasPrevious());
+ assertEquals(readTo - first, map.size());
+ } finally {
+ StoredIterator.close(iter);
+ }
+
+ /* Restore contents. */
+ for (Map.Entry entry : savedMap.entrySet()) {
+ if (!imap.entrySet().contains(entry)) {
+ imap.put(entry.getKey(), entry.getValue());
+ }
+ }
+ assertEquals(savedMap, map);
+ }
+
+ /* Skip N backward, remove all from that point backward. */
+ for (int readTo = last - 1; readTo >= first; readTo -= 1) {
+ iter = (ListIterator) iterator(map.keySet());
+ try {
+ while (iter.hasNext()) {
+ assertNotNull(iter.next());
+ }
+ for (int i = last; i > readTo; i -= 1) {
+ assertTrue(iter.hasPrevious());
+ assertNotNull(iter.previous());
+ }
+ for (int i = readTo; i >= first; i -= 1) {
+ assertTrue(iter.hasPrevious());
+ assertNotNull(iter.previous());
+ iter.remove();
+ }
+ assertTrue(!iter.hasPrevious());
+ assertTrue(iter.hasNext());
+ assertEquals(last - readTo, map.size());
+ } finally {
+ StoredIterator.close(iter);
+ }
+
+ /* Restore contents. */
+ for (Map.Entry entry : savedMap.entrySet()) {
+ if (!imap.entrySet().contains(entry)) {
+ imap.put(entry.getKey(), entry.getValue());
+ }
+ }
+ assertEquals(savedMap, map);
+ }
+ }
+ });
+ }
+
+ void iteratorSetAndRemoveNotAllowed(ListIterator i) {
+
+ try {
+ i.remove();
+ fail();
+ } catch (IllegalStateException e) {}
+
+ if (index == null) {
+ try {
+ Object val = makeVal(1);
+ i.set(val);
+ fail();
+ } catch (IllegalStateException e) {}
+ }
+ }
+
+ void removeOdd()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ boolean toggle = false;
+ for (int i = beginKey; i <= endKey; i += 2) {
+ toggle = !toggle;
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ if (toggle) {
+ assertTrue(map.keySet().contains(key));
+ assertTrue(map.keySet().remove(key));
+ assertTrue(!map.keySet().contains(key));
+ } else {
+ assertTrue(map.containsValue(val));
+ Object oldVal = map.remove(key);
+ assertEquals(oldVal, val);
+ assertTrue(!map.containsKey(key));
+ assertTrue(!map.containsValue(val));
+ }
+ assertNull(map.get(key));
+ assertTrue(!map.duplicates(key).contains(val));
+ checkDupsSize(0, map.duplicates(key));
+ }
+ }
+ });
+ }
+
+ void removeOddEntity()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ for (int i = beginKey; i <= endKey; i += 2) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ assertTrue(map.values().contains(val));
+ assertTrue(map.values().remove(val));
+ assertTrue(!map.values().contains(val));
+ assertNull(map.get(key));
+ assertTrue(!map.duplicates(key).contains(val));
+ checkDupsSize(0, map.duplicates(key));
+ }
+ }
+ });
+ }
+
+ void removeOddEntry()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ for (int i = beginKey; i <= endKey; i += 2) {
+ Long key = makeKey(i);
+ Object val = mapEntry(i);
+ assertTrue(map.entrySet().contains(val));
+ assertTrue(map.entrySet().remove(val));
+ assertTrue(!map.entrySet().contains(val));
+ assertNull(map.get(key));
+ }
+ }
+ });
+ }
+
+ void removeOddIter()
+ throws Exception {
+
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ Iterator iter = iterator(map.keySet());
+ try {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ assertTrue(iter.hasNext());
+ Long key = (Long) iter.next();
+ assertNotNull(key);
+ if (map instanceof SortedMap) {
+ assertEquals(makeKey(i), key);
+ }
+ if ((key.intValue() & 1) != 0) {
+ iter.remove();
+ }
+ }
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+ });
+ }
+
+ void removeOddList()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ for (int i = beginKey; i <= endKey; i += 2) {
+ // remove by index
+ // (with entity binding, embbeded keys in values are
+ // being changed so we can't use values for comparison)
+ int idx = (i - beginKey) / 2;
+ Object val = makeVal(i);
+ if (!isEntityBinding) {
+ assertTrue(list.contains(val));
+ assertEquals(val, list.get(idx));
+ assertEquals(idx, list.indexOf(val));
+ }
+ assertNotNull(list.get(idx));
+ if (isEntityBinding) {
+ assertNotNull(list.remove(idx));
+ } else {
+ assertTrue(list.contains(val));
+ assertEquals(val, list.remove(idx));
+ }
+ assertTrue(!list.remove(val));
+ assertTrue(!list.contains(val));
+ assertTrue(!val.equals(list.get(idx)));
+ }
+ }
+ });
+ }
+
+ void removeOddListValue()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ for (int i = beginKey; i <= endKey; i += 2) {
+ // for non-entity case remove by value
+ // (with entity binding, embbeded keys in values are
+ // being changed so we can't use values for comparison)
+ int idx = (i - beginKey) / 2;
+ Object val = makeVal(i);
+ assertTrue(list.contains(val));
+ assertEquals(val, list.get(idx));
+ assertEquals(idx, list.indexOf(val));
+ assertTrue(list.remove(val));
+ assertTrue(!list.remove(val));
+ assertTrue(!list.contains(val));
+ assertTrue(!val.equals(list.get(idx)));
+ }
+ }
+ });
+ }
+
+ void addOdd()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ // add using Map.put()
+ for (int i = beginKey; i <= endKey; i += 2) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ assertNull(imap.get(key));
+ assertNull(imap.put(key, val));
+ assertEquals(val, imap.get(key));
+ assertTrue(imap.duplicates(key).contains(val));
+ checkDupsSize(1, imap.duplicates(key));
+ if (isEntityBinding) {
+ assertTrue(!imap.values().add(val));
+ }
+ if (!imap.areDuplicatesAllowed()) {
+ assertEquals(val, imap.put(key, val));
+ }
+ }
+ }
+ });
+ }
+
+ void addOddEntity()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ // add using Map.values().add()
+ for (int i = beginKey; i <= endKey; i += 2) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ assertNull(imap.get(key));
+ assertTrue(!imap.values().contains(val));
+ assertTrue(imap.values().add(val));
+ assertEquals(val, imap.get(key));
+ assertTrue(imap.values().contains(val));
+ assertTrue(imap.duplicates(key).contains(val));
+ checkDupsSize(1, imap.duplicates(key));
+ if (isEntityBinding) {
+ assertTrue(!imap.values().add(val));
+ }
+ }
+ }
+ });
+ }
+
+ void addOddDup()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ // add using Map.duplicates().add()
+ for (int i = beginKey; i <= endKey; i += 2) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ assertNull(imap.get(key));
+ assertTrue(!imap.values().contains(val));
+ assertTrue(imap.duplicates(key).add(val));
+ assertEquals(val, imap.get(key));
+ assertTrue(imap.values().contains(val));
+ assertTrue(imap.duplicates(key).contains(val));
+ checkDupsSize(1, imap.duplicates(key));
+ assertTrue(!imap.duplicates(key).add(val));
+ if (isEntityBinding) {
+ assertTrue(!imap.values().add(val));
+ }
+ }
+ }
+ });
+ }
+
+ void addOddList()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ for (int i = beginKey; i <= endKey; i += 2) {
+ int idx = i - beginKey;
+ Object val = makeVal(i);
+ assertTrue(!list.contains(val));
+ assertTrue(!val.equals(list.get(idx)));
+ list.add(idx, val);
+ assertTrue(list.contains(val));
+ assertEquals(val, list.get(idx));
+ }
+ }
+ });
+ }
+
+ void addAllList()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ int idx = i - beginKey;
+ Object val = makeVal(i);
+ assertTrue(!list.contains(val));
+ assertTrue(list.add(val));
+ assertTrue(list.contains(val));
+ assertEquals(val, list.get(idx));
+ }
+ }
+ });
+ }
+
+ void removeAllList()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ assertTrue(!list.isEmpty());
+ list.clear();
+ assertTrue(list.isEmpty());
+ for (int i = beginKey; i <= endKey; i += 1) {
+ int idx = i - beginKey;
+ assertNull(list.get(idx));
+ }
+ }
+ });
+ }
+
+ /**
+ * Tests ConcurentMap methods implemented by StordMap. Starts with an
+ * empty DB and ends with an empty DB. [#16218]
+ */
+ void testConcurrentMap()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Long key = makeKey(i);
+ Object val = makeVal(i);
+ Object valPlusOne = makeVal(i, i + 1);
+ assertFalse(imap.containsKey(key));
+
+ assertNull(imap.putIfAbsent(key, val));
+ assertEquals(val, imap.get(key));
+
+ assertEquals(val, imap.putIfAbsent(key, val));
+ assertEquals(val, imap.get(key));
+
+ if (!imap.areDuplicatesAllowed()) {
+ assertEquals(val, imap.replace(key, valPlusOne));
+ assertEquals(valPlusOne, imap.get(key));
+
+ assertEquals(valPlusOne, imap.replace(key, val));
+ assertEquals(val, imap.get(key));
+
+ assertFalse(imap.replace(key, valPlusOne, val));
+ assertEquals(val, imap.get(key));
+
+ assertTrue(imap.replace(key, val, valPlusOne));
+ assertEquals(valPlusOne, imap.get(key));
+
+ assertTrue(imap.replace(key, valPlusOne, val));
+ assertEquals(val, imap.get(key));
+ }
+
+ assertFalse(imap.remove(key, valPlusOne));
+ assertTrue(imap.containsKey(key));
+
+ assertTrue(imap.remove(key, val));
+ assertFalse(imap.containsKey(key));
+
+ assertNull(imap.replace(key, val));
+ assertFalse(imap.containsKey(key));
+ }
+ }
+ });
+ }
+
+ void testIterAddList()
+ throws Exception {
+
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ ListIterator i = (ListIterator) iterator(list);
+ try {
+ assertTrue(!i.hasNext());
+ i.add(makeVal(3));
+ assertTrue(!i.hasNext());
+ assertTrue(i.hasPrevious());
+ assertEquals(3, intVal(i.previous()));
+
+ i.add(makeVal(1));
+ assertTrue(i.hasPrevious());
+ assertTrue(i.hasNext());
+ assertEquals(1, intVal(i.previous()));
+ assertTrue(i.hasNext());
+ assertEquals(1, intVal(i.next()));
+ assertTrue(i.hasNext());
+ assertEquals(3, intVal(i.next()));
+ assertEquals(3, intVal(i.previous()));
+
+ assertTrue(i.hasNext());
+ i.add(makeVal(2));
+ assertTrue(i.hasNext());
+ assertTrue(i.hasPrevious());
+ assertEquals(2, intVal(i.previous()));
+ assertTrue(i.hasNext());
+ assertEquals(2, intVal(i.next()));
+ assertTrue(i.hasNext());
+ assertEquals(3, intVal(i.next()));
+
+ assertTrue(!i.hasNext());
+ i.add(makeVal(4));
+ i.add(makeVal(5));
+ assertTrue(!i.hasNext());
+ assertEquals(5, intVal(i.previous()));
+ assertEquals(4, intVal(i.previous()));
+ assertEquals(3, intVal(i.previous()));
+ assertEquals(2, intVal(i.previous()));
+ assertEquals(1, intVal(i.previous()));
+ assertTrue(!i.hasPrevious());
+ } finally {
+ StoredIterator.close(i);
+ }
+ }
+ });
+ }
+
+ void testIterAddDuplicates()
+ throws Exception {
+
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ assertNull(imap.put(makeKey(1), makeVal(1)));
+ ListIterator i =
+ (ListIterator) iterator(imap.duplicates(makeKey(1)));
+ try {
+ if (imap.areDuplicatesOrdered()) {
+ i.add(makeVal(1, 4));
+ i.add(makeVal(1, 2));
+ i.add(makeVal(1, 3));
+ while (i.hasPrevious()) i.previous();
+ assertEquals(1, intVal(i.next()));
+ assertEquals(2, intVal(i.next()));
+ assertEquals(3, intVal(i.next()));
+ assertEquals(4, intVal(i.next()));
+ assertTrue(!i.hasNext());
+ } else {
+ assertEquals(1, intVal(i.next()));
+ i.add(makeVal(1, 2));
+ i.add(makeVal(1, 3));
+ assertTrue(!i.hasNext());
+ assertTrue(i.hasPrevious());
+ assertEquals(3, intVal(i.previous()));
+ assertEquals(2, intVal(i.previous()));
+ assertEquals(1, intVal(i.previous()));
+ assertTrue(!i.hasPrevious());
+ i.add(makeVal(1, 4));
+ i.add(makeVal(1, 5));
+ assertTrue(i.hasNext());
+ assertEquals(5, intVal(i.previous()));
+ assertEquals(4, intVal(i.previous()));
+ assertTrue(!i.hasPrevious());
+ assertEquals(4, intVal(i.next()));
+ assertEquals(5, intVal(i.next()));
+ assertEquals(1, intVal(i.next()));
+ assertEquals(2, intVal(i.next()));
+ assertEquals(3, intVal(i.next()));
+ assertTrue(!i.hasNext());
+ }
+ } finally {
+ StoredIterator.close(i);
+ }
+ }
+ });
+ }
+
+ void readAll()
+ throws Exception {
+
+ readRunner.run(new TransactionWorker() {
+ public void doWork() {
+ // map
+
+ assertNotNull(map.toString());
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Long key = makeKey(i);
+ Object val = map.get(key);
+ assertEquals(makeVal(i), val);
+ assertTrue(map.containsKey(key));
+ assertTrue(map.containsValue(val));
+ assertTrue(map.keySet().contains(key));
+ assertTrue(map.values().contains(val));
+ assertTrue(map.duplicates(key).contains(val));
+ checkDupsSize(1, map.duplicates(key));
+ }
+ assertNull(map.get(makeKey(-1)));
+ assertNull(map.get(makeKey(0)));
+ assertNull(map.get(makeKey(beginKey - 1)));
+ assertNull(map.get(makeKey(endKey + 1)));
+ checkDupsSize(0, map.duplicates(makeKey(-1)));
+ checkDupsSize(0, map.duplicates(makeKey(0)));
+ checkDupsSize(0, map.duplicates(makeKey(beginKey - 1)));
+ checkDupsSize(0, map.duplicates(makeKey(endKey + 1)));
+
+ // entrySet
+
+ Set set = map.entrySet();
+ assertNotNull(set.toString());
+ assertEquals(beginKey > endKey, set.isEmpty());
+ Iterator iter = iterator(set);
+ try {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ assertTrue(iter.hasNext());
+ Map.Entry entry = (Map.Entry) iter.next();
+ Long key = (Long) entry.getKey();
+ Object val = entry.getValue();
+ if (map instanceof SortedMap) {
+ assertEquals(intKey(key), i);
+ }
+ assertEquals(intKey(key), intVal(val));
+ assertTrue(set.contains(entry));
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ Map.Entry[] entries =
+ (Map.Entry[]) set.toArray(new Map.Entry[0]);
+ assertNotNull(entries);
+ assertEquals(endKey - beginKey + 1, entries.length);
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Map.Entry entry = entries[i - beginKey];
+ assertNotNull(entry);
+ if (map instanceof SortedMap) {
+ assertEquals(makeKey(i), entry.getKey());
+ assertEquals(makeVal(i), entry.getValue());
+ }
+ }
+ readIterator(set, iterator(set), beginKey, endKey);
+ if (smap != null) {
+ SortedSet sset = (SortedSet) set;
+ if (beginKey == 1 && endKey >= 1) {
+ readIterator(sset,
+ iterator(sset.subSet(mapEntry(1),
+ mapEntry(2))),
+ 1, 1);
+ }
+ if (beginKey <= 2 && endKey >= 2) {
+ readIterator(sset,
+ iterator(sset.subSet(mapEntry(2),
+ mapEntry(3))),
+ 2, 2);
+ }
+ if (beginKey <= endKey) {
+ readIterator(sset,
+ iterator(sset.subSet
+ (mapEntry(endKey),
+ mapEntry(endKey + 1))),
+ endKey, endKey);
+ }
+ if (isSubMap()) {
+ if (beginKey <= endKey) {
+ if (rangeType != TAIL) {
+ try {
+ sset.subSet(mapEntry(endKey + 1),
+ mapEntry(endKey + 2));
+ fail();
+ } catch (IllegalArgumentException e) {}
+ }
+ if (rangeType != HEAD) {
+ try {
+ sset.subSet(mapEntry(0),
+ mapEntry(1));
+ fail();
+ } catch (IllegalArgumentException e) {}
+ }
+ }
+ } else {
+ readIterator(sset,
+ iterator(sset.subSet
+ (mapEntry(endKey + 1),
+ mapEntry(endKey + 2))),
+ endKey, endKey - 1);
+ readIterator(sset,
+ iterator(sset.subSet(mapEntry(0),
+ mapEntry(1))),
+ 0, -1);
+ }
+ }
+
+ // keySet
+
+ set = map.keySet();
+ assertNotNull(set.toString());
+ assertEquals(beginKey > endKey, set.isEmpty());
+ iter = iterator(set);
+ try {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ assertTrue(iter.hasNext());
+ Long key = (Long) iter.next();
+ assertTrue(set.contains(key));
+ Object val = map.get(key);
+ if (map instanceof SortedMap) {
+ assertEquals(key, makeKey(i));
+ }
+ assertEquals(intKey(key), intVal(val));
+ }
+ assertTrue("" + beginKey + ' ' + endKey, !iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ Long[] keys = (Long[]) set.toArray(new Long[0]);
+ assertNotNull(keys);
+ assertEquals(endKey - beginKey + 1, keys.length);
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Long key = keys[i - beginKey];
+ assertNotNull(key);
+ if (map instanceof SortedMap) {
+ assertEquals(makeKey(i), key);
+ }
+ }
+ readIterator(set, iterator(set), beginKey, endKey);
+
+ // values
+
+ Collection coll = map.values();
+ assertNotNull(coll.toString());
+ assertEquals(beginKey > endKey, coll.isEmpty());
+ iter = iterator(coll);
+ try {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ assertTrue(iter.hasNext());
+ Object val = iter.next();
+ if (map instanceof SortedMap) {
+ assertEquals(makeVal(i), val);
+ }
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ Object[] values = coll.toArray();
+ assertNotNull(values);
+ assertEquals(endKey - beginKey + 1, values.length);
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Object val = values[i - beginKey];
+ assertNotNull(val);
+ if (map instanceof SortedMap) {
+ assertEquals(makeVal(i), val);
+ }
+ }
+ readIterator(coll, iterator(coll), beginKey, endKey);
+
+ // list
+
+ if (list != null) {
+ assertNotNull(list.toString());
+ assertEquals(beginKey > endKey, list.isEmpty());
+ for (int i = beginKey; i <= endKey; i += 1) {
+ int idx = i - beginKey;
+ Object val = list.get(idx);
+ assertEquals(makeVal(i), val);
+ assertTrue(list.contains(val));
+ assertEquals(idx, list.indexOf(val));
+ assertEquals(idx, list.lastIndexOf(val));
+ }
+ ListIterator li = (ListIterator) iterator(list);
+ try {
+ for (int i = beginKey; i <= endKey; i += 1) {
+ int idx = i - beginKey;
+ assertTrue(li.hasNext());
+ assertEquals(idx, li.nextIndex());
+ Object val = li.next();
+ assertEquals(makeVal(i), val);
+ assertEquals(idx, li.previousIndex());
+ }
+ assertTrue(!li.hasNext());
+ } finally {
+ StoredIterator.close(li);
+ }
+ if (beginKey < endKey) {
+ li = list.listIterator(1);
+ try {
+ for (int i = beginKey + 1; i <= endKey; i += 1) {
+ int idx = i - beginKey;
+ assertTrue(li.hasNext());
+ assertEquals(idx, li.nextIndex());
+ Object val = li.next();
+ assertEquals(makeVal(i), val);
+ assertEquals(idx, li.previousIndex());
+ }
+ assertTrue(!li.hasNext());
+ } finally {
+ StoredIterator.close(li);
+ }
+ }
+ values = list.toArray();
+ assertNotNull(values);
+ assertEquals(endKey - beginKey + 1, values.length);
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Object val = values[i - beginKey];
+ assertNotNull(val);
+ assertEquals(makeVal(i), val);
+ }
+ readIterator(list, iterator(list), beginKey, endKey);
+ }
+
+ // first/last
+
+ if (smap != null) {
+ if (beginKey <= endKey &&
+ beginKey >= 1 && beginKey <= maxKey) {
+ assertEquals(makeKey(beginKey),
+ smap.firstKey());
+ assertEquals(makeKey(beginKey),
+ ((SortedSet) smap.keySet()).first());
+ Object entry = ((SortedSet) smap.entrySet()).first();
+ assertEquals(makeKey(beginKey),
+ ((Map.Entry) entry).getKey());
+ if (smap.values() instanceof SortedSet) {
+ assertEquals(makeVal(beginKey),
+ ((SortedSet) smap.values()).first());
+ }
+ } else {
+ assertNull(smap.firstKey());
+ assertNull(((SortedSet) smap.keySet()).first());
+ assertNull(((SortedSet) smap.entrySet()).first());
+ if (smap.values() instanceof SortedSet) {
+ assertNull(((SortedSet) smap.values()).first());
+ }
+ }
+ if (beginKey <= endKey &&
+ endKey >= 1 && endKey <= maxKey) {
+ assertEquals(makeKey(endKey),
+ smap.lastKey());
+ assertEquals(makeKey(endKey),
+ ((SortedSet) smap.keySet()).last());
+ Object entry = ((SortedSet) smap.entrySet()).last();
+ assertEquals(makeKey(endKey),
+ ((Map.Entry) entry).getKey());
+ if (smap.values() instanceof SortedSet) {
+ assertEquals(makeVal(endKey),
+ ((SortedSet) smap.values()).last());
+ }
+ } else {
+ assertNull(smap.lastKey());
+ assertNull(((SortedSet) smap.keySet()).last());
+ assertNull(((SortedSet) smap.entrySet()).last());
+ if (smap.values() instanceof SortedSet) {
+ assertNull(((SortedSet) smap.values()).last());
+ }
+ }
+ }
+ }
+ });
+ }
+
+ void readEven()
+ throws Exception {
+
+ readRunner.run(new TransactionWorker() {
+ public void doWork() {
+ int readBegin = ((beginKey & 1) != 0) ?
+ (beginKey + 1) : beginKey;
+ int readEnd = ((endKey & 1) != 0) ? (endKey - 1) : endKey;
+ int readIncr = 2;
+
+ // map
+
+ for (int i = beginKey; i <= endKey; i += 1) {
+ Long key = makeKey(i);
+ if ((i & 1) == 0) {
+ Object val = map.get(key);
+ assertEquals(makeVal(i), val);
+ assertTrue(map.containsKey(key));
+ assertTrue(map.containsValue(val));
+ assertTrue(map.keySet().contains(key));
+ assertTrue(map.values().contains(val));
+ assertTrue(map.duplicates(key).contains(val));
+ checkDupsSize(1, map.duplicates(key));
+ } else {
+ Object val = makeVal(i);
+ assertTrue(!map.containsKey(key));
+ assertTrue(!map.containsValue(val));
+ assertTrue(!map.keySet().contains(key));
+ assertTrue(!map.values().contains(val));
+ assertTrue(!map.duplicates(key).contains(val));
+ checkDupsSize(0, map.duplicates(key));
+ }
+ }
+
+ // entrySet
+
+ Set set = map.entrySet();
+ assertEquals(beginKey > endKey, set.isEmpty());
+ Iterator iter = iterator(set);
+ try {
+ for (int i = readBegin; i <= readEnd; i += readIncr) {
+ assertTrue(iter.hasNext());
+ Map.Entry entry = (Map.Entry) iter.next();
+ Long key = (Long) entry.getKey();
+ Object val = entry.getValue();
+ if (map instanceof SortedMap) {
+ assertEquals(intKey(key), i);
+ }
+ assertEquals(intKey(key), intVal(val));
+ assertTrue(set.contains(entry));
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+
+ // keySet
+
+ set = map.keySet();
+ assertEquals(beginKey > endKey, set.isEmpty());
+ iter = iterator(set);
+ try {
+ for (int i = readBegin; i <= readEnd; i += readIncr) {
+ assertTrue(iter.hasNext());
+ Long key = (Long) iter.next();
+ assertTrue(set.contains(key));
+ Object val = map.get(key);
+ if (map instanceof SortedMap) {
+ assertEquals(key, makeKey(i));
+ }
+ assertEquals(intKey(key), intVal(val));
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+
+ // values
+
+ Collection coll = map.values();
+ assertEquals(beginKey > endKey, coll.isEmpty());
+ iter = iterator(coll);
+ try {
+ for (int i = readBegin; i <= readEnd; i += readIncr) {
+ assertTrue(iter.hasNext());
+ Object val = iter.next();
+ if (map instanceof SortedMap) {
+ assertEquals(makeVal(i), val);
+ }
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+
+ // list not used since keys may not be renumbered for this
+ // method to work in general
+
+ // first/last
+
+ if (smap != null) {
+ if (readBegin <= readEnd &&
+ readBegin >= 1 && readBegin <= maxKey) {
+ assertEquals(makeKey(readBegin),
+ smap.firstKey());
+ assertEquals(makeKey(readBegin),
+ ((SortedSet) smap.keySet()).first());
+ Object entry = ((SortedSet) smap.entrySet()).first();
+ assertEquals(makeKey(readBegin),
+ ((Map.Entry) entry).getKey());
+ if (smap.values() instanceof SortedSet) {
+ assertEquals(makeVal(readBegin),
+ ((SortedSet) smap.values()).first());
+ }
+ } else {
+ assertNull(smap.firstKey());
+ assertNull(((SortedSet) smap.keySet()).first());
+ assertNull(((SortedSet) smap.entrySet()).first());
+ if (smap.values() instanceof SortedSet) {
+ assertNull(((SortedSet) smap.values()).first());
+ }
+ }
+ if (readBegin <= readEnd &&
+ readEnd >= 1 && readEnd <= maxKey) {
+ assertEquals(makeKey(readEnd),
+ smap.lastKey());
+ assertEquals(makeKey(readEnd),
+ ((SortedSet) smap.keySet()).last());
+ Object entry = ((SortedSet) smap.entrySet()).last();
+ assertEquals(makeKey(readEnd),
+ ((Map.Entry) entry).getKey());
+ if (smap.values() instanceof SortedSet) {
+ assertEquals(makeVal(readEnd),
+ ((SortedSet) smap.values()).last());
+ }
+ } else {
+ assertNull(smap.lastKey());
+ assertNull(((SortedSet) smap.keySet()).last());
+ assertNull(((SortedSet) smap.entrySet()).last());
+ if (smap.values() instanceof SortedSet) {
+ assertNull(((SortedSet) smap.values()).last());
+ }
+ }
+ }
+ }
+ });
+ }
+
+ void readEvenList()
+ throws Exception {
+
+ readRunner.run(new TransactionWorker() {
+ public void doWork() {
+ int readBegin = ((beginKey & 1) != 0) ?
+ (beginKey + 1) : beginKey;
+ int readEnd = ((endKey & 1) != 0) ? (endKey - 1) : endKey;
+ int readIncr = 2;
+
+ assertEquals(beginKey > endKey, list.isEmpty());
+ ListIterator iter = (ListIterator) iterator(list);
+ try {
+ int idx = 0;
+ for (int i = readBegin; i <= readEnd; i += readIncr) {
+ assertTrue(iter.hasNext());
+ assertEquals(idx, iter.nextIndex());
+ Object val = iter.next();
+ assertEquals(idx, iter.previousIndex());
+ if (isEntityBinding) {
+ assertEquals(i, intVal(val));
+ } else {
+ assertEquals(makeVal(i), val);
+ }
+ idx += 1;
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+ });
+ }
+
+ void readIterator(Collection coll, Iterator iter,
+ int beginValue, int endValue) {
+
+ ListIterator li = (ListIterator) iter;
+ boolean isList = (coll instanceof List);
+ Iterator clone = null;
+ try {
+ // at beginning
+ assertTrue(!li.hasPrevious());
+ assertTrue(!li.hasPrevious());
+ try { li.previous(); } catch (NoSuchElementException e) {}
+ if (isList) {
+ assertEquals(-1, li.previousIndex());
+ }
+ if (endValue < beginValue) {
+ // is empty
+ assertTrue(!iter.hasNext());
+ try { iter.next(); } catch (NoSuchElementException e) {}
+ if (isList) {
+ assertEquals(Integer.MAX_VALUE, li.nextIndex());
+ }
+ }
+ // loop thru all and collect in array
+ int[] values = new int[endValue - beginValue + 1];
+ for (int i = beginValue; i <= endValue; i += 1) {
+ assertTrue(iter.hasNext());
+ int idx = i - beginKey;
+ if (isList) {
+ assertEquals(idx, li.nextIndex());
+ }
+ int value = intIter(coll, iter.next());
+ if (isList) {
+ assertEquals(idx, li.previousIndex());
+ }
+ values[i - beginValue] = value;
+ if (((StoredCollection) coll).isOrdered()) {
+ assertEquals(i, value);
+ } else {
+ assertTrue(value >= beginValue);
+ assertTrue(value <= endValue);
+ }
+ }
+ // at end
+ assertTrue(!iter.hasNext());
+ try { iter.next(); } catch (NoSuchElementException e) {}
+ if (isList) {
+ assertEquals(Integer.MAX_VALUE, li.nextIndex());
+ }
+ // clone at same position
+ clone = StoredCollections.iterator(iter);
+ assertTrue(!clone.hasNext());
+ // loop thru in reverse
+ for (int i = endValue; i >= beginValue; i -= 1) {
+ assertTrue(li.hasPrevious());
+ int idx = i - beginKey;
+ if (isList) {
+ assertEquals(idx, li.previousIndex());
+ }
+ int value = intIter(coll, li.previous());
+ if (isList) {
+ assertEquals(idx, li.nextIndex());
+ }
+ assertEquals(values[i - beginValue], value);
+ }
+ // clone should not have changed
+ assertTrue(!clone.hasNext());
+ // at beginning
+ assertTrue(!li.hasPrevious());
+ try { li.previous(); } catch (NoSuchElementException e) {}
+ if (isList) {
+ assertEquals(-1, li.previousIndex());
+ }
+ // loop thru with some back-and-forth
+ for (int i = beginValue; i <= endValue; i += 1) {
+ assertTrue(iter.hasNext());
+ int idx = i - beginKey;
+ if (isList) {
+ assertEquals(idx, li.nextIndex());
+ }
+ Object obj = iter.next();
+ if (isList) {
+ assertEquals(idx, li.previousIndex());
+ }
+ assertEquals(obj, li.previous());
+ if (isList) {
+ assertEquals(idx, li.nextIndex());
+ }
+ assertEquals(obj, iter.next());
+ if (isList) {
+ assertEquals(idx, li.previousIndex());
+ }
+ int value = intIter(coll, obj);
+ assertEquals(values[i - beginValue], value);
+ }
+ // at end
+ assertTrue(!iter.hasNext());
+ try { iter.next(); } catch (NoSuchElementException e) {}
+ if (isList) {
+ assertEquals(Integer.MAX_VALUE, li.nextIndex());
+ }
+ } finally {
+ StoredIterator.close(iter);
+ StoredIterator.close(clone);
+ }
+ }
+
+ void bulkOperations()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ HashMap hmap = new HashMap();
+ for (int i = Math.max(1, beginKey);
+ i <= Math.min(maxKey, endKey);
+ i += 1) {
+ hmap.put(makeKey(i), makeVal(i));
+ }
+ assertEquals(hmap, map);
+ assertEquals(hmap.entrySet(), map.entrySet());
+ assertEquals(hmap.keySet(), map.keySet());
+ assertEquals(map.values(), hmap.values());
+
+ assertTrue(map.entrySet().containsAll(hmap.entrySet()));
+ assertTrue(map.keySet().containsAll(hmap.keySet()));
+ assertTrue(map.values().containsAll(hmap.values()));
+
+ map.clear();
+ assertTrue(map.isEmpty());
+ imap.putAll(hmap);
+ assertEquals(hmap, map);
+
+ assertTrue(map.entrySet().removeAll(hmap.entrySet()));
+ assertTrue(map.entrySet().isEmpty());
+ assertTrue(!map.entrySet().removeAll(hmap.entrySet()));
+ assertTrue(imap.entrySet().addAll(hmap.entrySet()));
+ assertTrue(map.entrySet().containsAll(hmap.entrySet()));
+ assertTrue(!imap.entrySet().addAll(hmap.entrySet()));
+ assertEquals(hmap, map);
+
+ assertTrue(!map.entrySet().retainAll(hmap.entrySet()));
+ assertEquals(hmap, map);
+ assertTrue(map.entrySet().retainAll(Collections.EMPTY_SET));
+ assertTrue(map.isEmpty());
+ imap.putAll(hmap);
+ assertEquals(hmap, map);
+
+ assertTrue(map.values().removeAll(hmap.values()));
+ assertTrue(map.values().isEmpty());
+ assertTrue(!map.values().removeAll(hmap.values()));
+ if (isEntityBinding) {
+ assertTrue(imap.values().addAll(hmap.values()));
+ assertTrue(map.values().containsAll(hmap.values()));
+ assertTrue(!imap.values().addAll(hmap.values()));
+ } else {
+ imap.putAll(hmap);
+ }
+ assertEquals(hmap, map);
+
+ assertTrue(!map.values().retainAll(hmap.values()));
+ assertEquals(hmap, map);
+ assertTrue(map.values().retainAll(Collections.EMPTY_SET));
+ assertTrue(map.isEmpty());
+ imap.putAll(hmap);
+ assertEquals(hmap, map);
+
+ assertTrue(map.keySet().removeAll(hmap.keySet()));
+ assertTrue(map.keySet().isEmpty());
+ assertTrue(!map.keySet().removeAll(hmap.keySet()));
+ assertTrue(imap.keySet().addAll(hmap.keySet()));
+ assertTrue(imap.keySet().containsAll(hmap.keySet()));
+ if (index != null) {
+ assertTrue(map.keySet().isEmpty());
+ }
+ assertTrue(!imap.keySet().addAll(hmap.keySet()));
+ // restore values to non-null
+ imap.keySet().removeAll(hmap.keySet());
+ imap.putAll(hmap);
+ assertEquals(hmap, map);
+
+ assertTrue(!map.keySet().retainAll(hmap.keySet()));
+ assertEquals(hmap, map);
+ assertTrue(map.keySet().retainAll(Collections.EMPTY_SET));
+ assertTrue(map.isEmpty());
+ imap.putAll(hmap);
+ assertEquals(hmap, map);
+ }
+ });
+ }
+
+ void bulkListOperations()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() {
+ ArrayList alist = new ArrayList();
+ for (int i = beginKey; i <= endKey; i += 1) {
+ alist.add(makeVal(i));
+ }
+
+ assertEquals(alist, list);
+ assertTrue(list.containsAll(alist));
+
+ if (isListAddAllowed()) {
+ list.clear();
+ assertTrue(list.isEmpty());
+ assertTrue(ilist.addAll(alist));
+ assertEquals(alist, list);
+ }
+
+ assertTrue(!list.retainAll(alist));
+ assertEquals(alist, list);
+
+ if (isListAddAllowed()) {
+ assertTrue(list.retainAll(Collections.EMPTY_SET));
+ assertTrue(list.isEmpty());
+ assertTrue(ilist.addAll(alist));
+ assertEquals(alist, list);
+ }
+
+ if (isListAddAllowed() && !isEntityBinding) {
+ // deleting in a renumbered list with entity binding will
+ // change the values dynamically, making it very difficult
+ // to test
+ assertTrue(list.removeAll(alist));
+ assertTrue(list.isEmpty());
+ assertTrue(!list.removeAll(alist));
+ assertTrue(ilist.addAll(alist));
+ assertTrue(list.containsAll(alist));
+ assertEquals(alist, list);
+ }
+
+ if (isListAddAllowed() && !isEntityBinding) {
+ // addAll at an index is also very difficult to test with
+ // an entity binding
+
+ // addAll at first index
+ ilist.addAll(beginKey, alist);
+ assertTrue(list.containsAll(alist));
+ assertEquals(2 * alist.size(), countElements(list));
+ for (int i = beginKey; i <= endKey; i += 1)
+ ilist.remove(beginKey);
+ assertEquals(alist, list);
+
+ // addAll at last index
+ ilist.addAll(endKey, alist);
+ assertTrue(list.containsAll(alist));
+ assertEquals(2 * alist.size(), countElements(list));
+ for (int i = beginKey; i <= endKey; i += 1)
+ ilist.remove(endKey);
+ assertEquals(alist, list);
+
+ // addAll in the middle
+ ilist.addAll(endKey - 1, alist);
+ assertTrue(list.containsAll(alist));
+ assertEquals(2 * alist.size(), countElements(list));
+ for (int i = beginKey; i <= endKey; i += 1)
+ ilist.remove(endKey - 1);
+ assertEquals(alist, list);
+ }
+ }
+ });
+ }
+
+ void readWriteRange(final int type, final int rangeBegin,
+ final int rangeEnd)
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ setRange(type, rangeBegin, rangeEnd);
+ createOutOfRange(rangeBegin, rangeEnd);
+ if (rangeType != TAIL) {
+ writeOutOfRange(new Long(rangeEnd + 1));
+ }
+ if (rangeType != HEAD) {
+ writeOutOfRange(new Long(rangeBegin - 1));
+ }
+ if (rangeBegin <= rangeEnd) {
+ updateAll();
+ }
+ if (rangeBegin < rangeEnd && !map.areKeysRenumbered()) {
+ bulkOperations();
+ removeIter();
+ }
+ readAll();
+ clearRange();
+ }
+ });
+ }
+
+ void setRange(int type, int rangeBegin, int rangeEnd) {
+
+ rangeType = type;
+ saveMap = map;
+ saveSMap = smap;
+ saveList = list;
+ int listBegin = rangeBegin - beginKey;
+ boolean canMakeSubList = (list != null && listBegin>= 0);
+ if (!canMakeSubList) {
+ list = null;
+ }
+ if (list != null) {
+ try {
+ list.subList(-1, 0);
+ fail();
+ } catch (IndexOutOfBoundsException e) { }
+ }
+ switch (type) {
+
+ case SUB:
+ smap = (StoredSortedMap) smap.subMap(makeKey(rangeBegin),
+ makeKey(rangeEnd + 1));
+ if (canMakeSubList) {
+ list = (StoredList) list.subList(listBegin,
+ rangeEnd + 1 - beginKey);
+ }
+ // check for equivalent ranges
+ assertEquals(smap,
+ (saveSMap).subMap(
+ makeKey(rangeBegin), true,
+ makeKey(rangeEnd + 1), false));
+ assertEquals(smap.entrySet(),
+ ((StoredSortedEntrySet) saveSMap.entrySet()).subSet(
+ mapEntry(rangeBegin), true,
+ mapEntry(rangeEnd + 1), false));
+ assertEquals(smap.keySet(),
+ ((StoredSortedKeySet) saveSMap.keySet()).subSet(
+ makeKey(rangeBegin), true,
+ makeKey(rangeEnd + 1), false));
+ if (smap.values() instanceof SortedSet) {
+ assertEquals(smap.values(),
+ ((StoredSortedValueSet) saveSMap.values()).subSet(
+ makeVal(rangeBegin), true,
+ makeVal(rangeEnd + 1), false));
+ }
+ break;
+ case HEAD:
+ smap = (StoredSortedMap) smap.headMap(makeKey(rangeEnd + 1));
+ if (canMakeSubList) {
+ list = (StoredList) list.subList(0,
+ rangeEnd + 1 - beginKey);
+ }
+ // check for equivalent ranges
+ assertEquals(smap,
+ (saveSMap).headMap(
+ makeKey(rangeEnd + 1), false));
+ assertEquals(smap.entrySet(),
+ ((StoredSortedEntrySet) saveSMap.entrySet()).headSet(
+ mapEntry(rangeEnd + 1), false));
+ assertEquals(smap.keySet(),
+ ((StoredSortedKeySet) saveSMap.keySet()).headSet(
+ makeKey(rangeEnd + 1), false));
+ if (smap.values() instanceof SortedSet) {
+ assertEquals(smap.values(),
+ ((StoredSortedValueSet) saveSMap.values()).headSet(
+ makeVal(rangeEnd + 1), false));
+ }
+ break;
+ case TAIL:
+ smap = (StoredSortedMap) smap.tailMap(makeKey(rangeBegin));
+ if (canMakeSubList) {
+ list = (StoredList) list.subList(listBegin,
+ maxKey + 1 - beginKey);
+ }
+ // check for equivalent ranges
+ assertEquals(smap,
+ (saveSMap).tailMap(
+ makeKey(rangeBegin), true));
+ assertEquals(smap.entrySet(),
+ ((StoredSortedEntrySet) saveSMap.entrySet()).tailSet(
+ mapEntry(rangeBegin), true));
+ assertEquals(smap.keySet(),
+ ((StoredSortedKeySet) saveSMap.keySet()).tailSet(
+ makeKey(rangeBegin), true));
+ if (smap.values() instanceof SortedSet) {
+ assertEquals(smap.values(),
+ ((StoredSortedValueSet) saveSMap.values()).tailSet(
+ makeVal(rangeBegin), true));
+ }
+ break;
+ default: throw new RuntimeException();
+ }
+ map = smap;
+ beginKey = rangeBegin;
+ if (rangeBegin < 1 || rangeEnd > maxKey) {
+ endKey = rangeBegin - 1; // force empty range for readAll()
+ } else {
+ endKey = rangeEnd;
+ }
+ }
+
+ void clearRange() {
+
+ rangeType = NONE;
+ beginKey = 1;
+ endKey = maxKey;
+ map = saveMap;
+ smap = saveSMap;
+ list = saveList;
+ }
+
+ void createOutOfRange(int rangeBegin, int rangeEnd) {
+ // map
+
+ if (rangeType != TAIL) {
+ try {
+ smap.subMap(makeKey(rangeBegin), makeKey(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ smap.headMap(makeKey(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ checkDupsSize(0, smap.duplicates(makeKey(rangeEnd + 2)));
+ }
+ if (rangeType != HEAD) {
+ try {
+ smap.subMap(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ smap.tailMap(makeKey(rangeBegin - 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ checkDupsSize(0, smap.duplicates(makeKey(rangeBegin - 1)));
+ }
+
+ // keySet
+
+ if (rangeType != TAIL) {
+ SortedSet sset = (SortedSet) map.keySet();
+ try {
+ sset.subSet(makeKey(rangeBegin), makeKey(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ sset.headSet(makeKey(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ iterator(sset.subSet(makeKey(rangeEnd + 1),
+ makeKey(rangeEnd + 2)));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ }
+ if (rangeType != HEAD) {
+ SortedSet sset = (SortedSet) map.keySet();
+ try {
+ sset.subSet(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ sset.tailSet(makeKey(rangeBegin - 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ iterator(sset.subSet(makeKey(rangeBegin - 1),
+ makeKey(rangeBegin)));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ }
+
+ // entrySet
+
+ if (rangeType != TAIL) {
+ SortedSet sset = (SortedSet) map.entrySet();
+ try {
+ sset.subSet(mapEntry(rangeBegin), mapEntry(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ sset.headSet(mapEntry(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ iterator(sset.subSet(mapEntry(rangeEnd + 1),
+ mapEntry(rangeEnd + 2)));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ }
+ if (rangeType != HEAD) {
+ SortedSet sset = (SortedSet) map.entrySet();
+ try {
+ sset.subSet(mapEntry(rangeBegin - 1), mapEntry(rangeEnd + 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ sset.tailSet(mapEntry(rangeBegin - 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ iterator(sset.subSet(mapEntry(rangeBegin - 1),
+ mapEntry(rangeBegin)));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ }
+
+ // values
+
+ if (map.values() instanceof SortedSet) {
+ SortedSet sset = (SortedSet) map.values();
+ if (rangeType != TAIL) {
+ try {
+ sset.subSet(makeVal(rangeBegin),
+ makeVal(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ sset.headSet(makeVal(rangeEnd + 2));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ }
+ if (rangeType != HEAD) {
+ try {
+ sset.subSet(makeVal(rangeBegin - 1),
+ makeVal(rangeEnd + 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ try {
+ sset.tailSet(makeVal(rangeBegin - 1));
+ fail();
+ } catch (IllegalArgumentException e) { }
+ }
+ }
+
+ // list
+
+ if (list != null) {
+ int size = rangeEnd - rangeBegin + 1;
+ try {
+ list.subList(0, size + 1);
+ fail();
+ } catch (IndexOutOfBoundsException e) { }
+ try {
+ list.subList(-1, size);
+ fail();
+ } catch (IndexOutOfBoundsException e) { }
+ try {
+ list.subList(2, 1);
+ fail();
+ } catch (IndexOutOfBoundsException e) { }
+ try {
+ list.subList(size, size);
+ fail();
+ } catch (IndexOutOfBoundsException e) { }
+ }
+ }
+
+ void writeOutOfRange(Long badNewKey) {
+ try {
+ map.put(badNewKey, makeVal(badNewKey));
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(e.toString(), index == null);
+ } catch (UnsupportedOperationException e) {
+ assertTrue(index != null);
+ }
+ try {
+ map.keySet().add(badNewKey);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(index == null);
+ } catch (UnsupportedOperationException e) {
+ assertTrue(index != null);
+ }
+ try {
+ map.values().add(makeEntity(badNewKey));
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(isEntityBinding && index == null);
+ } catch (UnsupportedOperationException e) {
+ assertTrue(!(isEntityBinding && index == null));
+ }
+ if (list != null) {
+ int i = badNewKey.intValue() - beginKey;
+ try {
+ list.set(i, makeVal(i));
+ fail();
+ } catch (IndexOutOfBoundsException e) {
+ assertTrue(index == null);
+ } catch (UnsupportedOperationException e) {
+ assertTrue(index != null);
+ }
+ try {
+ list.add(i, makeVal(badNewKey));
+ fail();
+ } catch (UnsupportedOperationException e) {
+ }
+ }
+ }
+
+ void readWriteDuplicates()
+ throws Exception {
+
+ writeRunner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ if (index == null) {
+ readWritePrimaryDuplicates(beginKey);
+ readWritePrimaryDuplicates(beginKey + 1);
+ readWritePrimaryDuplicates(endKey);
+ readWritePrimaryDuplicates(endKey - 1);
+ } else {
+ readWriteIndexedDuplicates(beginKey);
+ readWriteIndexedDuplicates(beginKey + 1);
+ readWriteIndexedDuplicates(endKey);
+ readWriteIndexedDuplicates(endKey - 1);
+ }
+ }
+ });
+ }
+
+ void readWritePrimaryDuplicates(int i)
+ throws Exception {
+
+ Collection dups;
+ // make duplicate values
+ final Long key = makeKey(i);
+ final Object[] values = new Object[5];
+ for (int j = 0; j < values.length; j += 1) {
+ values[j] = isEntityBinding
+ ? makeEntity(i, i + j)
+ : makeVal(i + j);
+ }
+ // add duplicates
+ outerLoop: for (int writeMode = 0;; writeMode += 1) {
+ //System.out.println("write mode " + writeMode);
+ switch (writeMode) {
+ case 0:
+ case 1: {
+ // write with Map.put()
+ for (int j = 1; j < values.length; j += 1) {
+ map.put(key, values[j]);
+ }
+ break;
+ }
+ case 2: {
+ // write with Map.duplicates().add()
+ dups = map.duplicates(key);
+ for (int j = 1; j < values.length; j += 1) {
+ dups.add(values[j]);
+ }
+ break;
+ }
+ case 3: {
+ // write with Map.duplicates().iterator().add()
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ Collection dups = map.duplicates(key);
+ Iterator iter = iterator(dups);
+ assertEquals(values[0], iter.next());
+ assertTrue(!iter.hasNext());
+ try {
+ for (int j = 1; j < values.length; j += 1) {
+ ((ListIterator) iter).add(values[j]);
+ }
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+ });
+ break;
+ }
+ case 4: {
+ // write with Map.values().add()
+ if (!isEntityBinding) {
+ continue;
+ }
+ Collection set = map.values();
+ for (int j = 1; j < values.length; j += 1) {
+ set.add(values[j]);
+ }
+ break;
+ }
+ default: {
+ break outerLoop;
+ }
+ }
+ checkDupsSize(values.length, map.duplicates(key));
+ // read duplicates
+ readDuplicates(i, key, values);
+ // remove duplicates
+ switch (writeMode) {
+ case 0: {
+ // remove with Map.remove()
+ checkDupsSize(values.length, map.duplicates(key));
+ map.remove(key); // remove all values
+ checkDupsSize(0, map.duplicates(key));
+ map.put(key, values[0]); // put back original value
+ checkDupsSize(1, map.duplicates(key));
+ break;
+ }
+ case 1: {
+ // remove with Map.keySet().remove()
+ map.keySet().remove(key); // remove all values
+ map.put(key, values[0]); // put back original value
+ break;
+ }
+ case 2: {
+ // remove with Map.duplicates().clear()
+ dups = map.duplicates(key);
+ dups.clear(); // remove all values
+ dups.add(values[0]); // put back original value
+ break;
+ }
+ case 3: {
+ // remove with Map.duplicates().iterator().remove()
+ writeIterRunner.run(new TransactionWorker() {
+ public void doWork() {
+ Collection dups = map.duplicates(key);
+ Iterator iter = iterator(dups);
+ try {
+ for (int j = 0; j < values.length; j += 1) {
+ assertEquals(values[j], iter.next());
+ if (j != 0) {
+ iter.remove();
+ }
+ }
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+ });
+ break;
+ }
+ case 4: {
+ // remove with Map.values().remove()
+ if (!isEntityBinding) {
+ throw new IllegalStateException();
+ }
+ Collection set = map.values();
+ for (int j = 1; j < values.length; j += 1) {
+ set.remove(values[j]);
+ }
+ break;
+ }
+ default: throw new IllegalStateException();
+ }
+ // verify that only original value is present
+ dups = map.duplicates(key);
+ assertTrue(dups.contains(values[0]));
+ for (int j = 1; j < values.length; j += 1) {
+ assertTrue(!dups.contains(values[j]));
+ }
+ checkDupsSize(1, dups);
+ }
+ }
+
+ void readWriteIndexedDuplicates(int i) {
+ Object key = makeKey(i);
+ Object[] values = new Object[3];
+ values[0] = makeVal(i);
+ for (int j = 1; j < values.length; j += 1) {
+ values[j] = isEntityBinding
+ ? makeEntity(endKey + j, i)
+ : makeVal(i);
+ }
+ // add duplicates
+ for (int j = 1; j < values.length; j += 1) {
+ imap.put(makeKey(endKey + j), values[j]);
+ }
+ // read duplicates
+ readDuplicates(i, key, values);
+ // remove duplicates
+ for (int j = 1; j < values.length; j += 1) {
+ imap.remove(makeKey(endKey + j));
+ }
+ checkDupsSize(1, map.duplicates(key));
+ }
+
+ void readDuplicates(int i, Object key, Object[] values) {
+
+ boolean isOrdered = map.isOrdered();
+ Collection dups;
+ Iterator iter;
+ // read with Map.duplicates().iterator()
+ dups = map.duplicates(key);
+ checkDupsSize(values.length, dups);
+ iter = iterator(dups);
+ try {
+ for (int j = 0; j < values.length; j += 1) {
+ assertTrue(iter.hasNext());
+ Object val = iter.next();
+ assertEquals(values[j], val);
+ }
+ assertTrue(!iter.hasNext());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ // read with Map.values().iterator()
+ Collection clone = ((StoredCollection) map.values()).toList();
+ iter = iterator(map.values());
+ try {
+ for (int j = beginKey; j < i; j += 1) {
+ Object val = iter.next();
+ assertTrue(clone.remove(makeVal(j)));
+ if (isOrdered) {
+ assertEquals(makeVal(j), val);
+ }
+ }
+ for (int j = 0; j < values.length; j += 1) {
+ Object val = iter.next();
+ assertTrue(clone.remove(values[j]));
+ if (isOrdered) {
+ assertEquals(values[j], val);
+ }
+ }
+ for (int j = i + 1; j <= endKey; j += 1) {
+ Object val = iter.next();
+ assertTrue(clone.remove(makeVal(j)));
+ if (isOrdered) {
+ assertEquals(makeVal(j), val);
+ }
+ }
+ assertTrue(!iter.hasNext());
+ assertTrue(clone.isEmpty());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ // read with Map.entrySet().iterator()
+ clone = ((StoredCollection) map.entrySet()).toList();
+ iter = iterator(map.entrySet());
+ try {
+ for (int j = beginKey; j < i; j += 1) {
+ Map.Entry entry = (Map.Entry) iter.next();
+ assertTrue(clone.remove(mapEntry(j)));
+ if (isOrdered) {
+ assertEquals(makeVal(j), entry.getValue());
+ assertEquals(makeKey(j), entry.getKey());
+ }
+ }
+ for (int j = 0; j < values.length; j += 1) {
+ Map.Entry entry = (Map.Entry) iter.next();
+ assertTrue(clone.remove(mapEntry(makeKey(i), values[j])));
+ if (isOrdered) {
+ assertEquals(values[j], entry.getValue());
+ assertEquals(makeKey(i), entry.getKey());
+ }
+ }
+ for (int j = i + 1; j <= endKey; j += 1) {
+ Map.Entry entry = (Map.Entry) iter.next();
+ assertTrue(clone.remove(mapEntry(j)));
+ if (isOrdered) {
+ assertEquals(makeVal(j), entry.getValue());
+ assertEquals(makeKey(j), entry.getKey());
+ }
+ }
+ assertTrue(!iter.hasNext());
+ assertTrue(clone.isEmpty());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ // read with Map.keySet().iterator()
+ clone = ((StoredCollection) map.keySet()).toList();
+ iter = iterator(map.keySet());
+ try {
+ for (int j = beginKey; j < i; j += 1) {
+ Object val = iter.next();
+ assertTrue(clone.remove(makeKey(j)));
+ if (isOrdered) {
+ assertEquals(makeKey(j), val);
+ }
+ }
+ if (true) {
+ // only one key is iterated for all duplicates
+ Object val = iter.next();
+ assertTrue(clone.remove(makeKey(i)));
+ if (isOrdered) {
+ assertEquals(makeKey(i), val);
+ }
+ }
+ for (int j = i + 1; j <= endKey; j += 1) {
+ Object val = iter.next();
+ assertTrue(clone.remove(makeKey(j)));
+ if (isOrdered) {
+ assertEquals(makeKey(j), val);
+ }
+ }
+ assertTrue(!iter.hasNext());
+ assertTrue(clone.isEmpty());
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+
+ void duplicatesNotAllowed() {
+
+ Collection dups = map.duplicates(makeKey(beginKey));
+ try {
+ dups.add(makeVal(beginKey));
+ fail();
+ } catch (UnsupportedOperationException expected) { }
+ ListIterator iter = (ListIterator) iterator(dups);
+ try {
+ iter.add(makeVal(beginKey));
+ fail();
+ } catch (UnsupportedOperationException expected) {
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+
+ void listOperationsNotAllowed() {
+
+ ListIterator iter = (ListIterator) iterator(map.values());
+ try {
+ try {
+ iter.nextIndex();
+ fail();
+ } catch (UnsupportedOperationException expected) { }
+ try {
+ iter.previousIndex();
+ fail();
+ } catch (UnsupportedOperationException expected) { }
+ } finally {
+ StoredIterator.close(iter);
+ }
+ }
+
+ void testCdbLocking() {
+
+ Iterator readIterator;
+ Iterator writeIterator;
+ StoredKeySet set = (StoredKeySet) map.keySet();
+
+ // can open two CDB read cursors
+ readIterator = set.storedIterator(false);
+ try {
+ Iterator readIterator2 = set.storedIterator(false);
+ StoredIterator.close(readIterator2);
+ } finally {
+ StoredIterator.close(readIterator);
+ }
+
+ // can open two CDB write cursors
+ writeIterator = set.storedIterator(true);
+ try {
+ Iterator writeIterator2 = set.storedIterator(true);
+ StoredIterator.close(writeIterator2);
+ } finally {
+ StoredIterator.close(writeIterator);
+ }
+
+ // cannot open CDB write cursor when read cursor is open,
+ readIterator = set.storedIterator(false);
+ try {
+ writeIterator = set.storedIterator(true);
+ fail();
+ StoredIterator.close(writeIterator);
+ } catch (IllegalStateException e) {
+ } finally {
+ StoredIterator.close(readIterator);
+ }
+
+ if (index == null) {
+ // cannot put() with read cursor open
+ readIterator = set.storedIterator(false);
+ try {
+ map.put(makeKey(1), makeVal(1));
+ fail();
+ } catch (IllegalStateException e) {
+ } finally {
+ StoredIterator.close(readIterator);
+ }
+
+ // cannot append() with write cursor open with RECNO/QUEUE only
+ writeIterator = set.storedIterator(true);
+ try {
+ if (testStore.isQueueOrRecno()) {
+ try {
+ map.append(makeVal(1));
+ fail();
+ } catch (IllegalStateException e) {}
+ } else {
+ map.append(makeVal(1));
+ }
+ } finally {
+ StoredIterator.close(writeIterator);
+ }
+ }
+ }
+
+ Object makeVal(int key) {
+
+ if (isEntityBinding) {
+ return makeEntity(key);
+ } else {
+ return new Long(key + 100);
+ }
+ }
+
+ Object makeVal(int key, int val) {
+
+ if (isEntityBinding) {
+ return makeEntity(key, val);
+ } else {
+ return makeVal(val);
+ }
+ }
+
+ Object makeEntity(int key, int val) {
+
+ return new TestEntity(key, val + 100);
+ }
+
+ int intVal(Object val) {
+
+ if (isEntityBinding) {
+ return ((TestEntity) val).value - 100;
+ } else {
+ return ((Long) val).intValue() - 100;
+ }
+ }
+
+ int intKey(Object key) {
+
+ return ((Long) key).intValue();
+ }
+
+ Object makeVal(Long key) {
+
+ return makeVal(key.intValue());
+ }
+
+ Object makeEntity(int key) {
+
+ return makeEntity(key, key);
+ }
+
+ Object makeEntity(Long key) {
+
+ return makeEntity(key.intValue());
+ }
+
+ int intIter(Collection coll, Object value) {
+
+ if (coll instanceof StoredKeySet) {
+ return intKey(value);
+ } else {
+ if (coll instanceof StoredEntrySet) {
+ value = ((Map.Entry) value).getValue();
+ }
+ return intVal(value);
+ }
+ }
+
+ Map.Entry mapEntry(Object key, Object val) {
+
+ return new MapEntryParameter(key, val);
+ }
+
+ Map.Entry mapEntry(int key) {
+
+ return new MapEntryParameter(makeKey(key), makeVal(key));
+ }
+
+ Long makeKey(int key) {
+
+ return new Long(key);
+ }
+
+ boolean isSubMap() {
+
+ return rangeType != NONE;
+ }
+
+ void checkDupsSize(int expected, Collection coll) {
+
+ assertEquals(expected, coll.size());
+ if (coll instanceof StoredCollection) {
+ StoredIterator i = ((StoredCollection) coll).storedIterator(false);
+ try {
+ int actual = 0;
+ if (i.hasNext()) {
+ i.next();
+ actual = i.count();
+ }
+ assertEquals(expected, actual);
+ } finally {
+ StoredIterator.close(i);
+ }
+ }
+ }
+
+ private boolean isListAddAllowed() {
+
+ return list != null && testStore.isQueueOrRecno() &&
+ list.areKeysRenumbered();
+ }
+
+ private int countElements(Collection coll) {
+
+ int count = 0;
+ Iterator iter = iterator(coll);
+ try {
+ while (iter.hasNext()) {
+ iter.next();
+ count += 1;
+ }
+ } finally {
+ StoredIterator.close(iter);
+ }
+ return count;
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java
new file mode 100644
index 0000000..584982c
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/DbTestUtil.java
@@ -0,0 +1,129 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.db.DatabaseConfig;
+
+/**
+ * @author Mark Hayes
+ */
+public class DbTestUtil {
+
+ public static final DatabaseConfig DBCONFIG_CREATE = new DatabaseConfig();
+ static {
+ DBCONFIG_CREATE.setAllowCreate(true);
+ }
+
+ private static final File TEST_DIR;
+ static {
+ String dir = System.getProperty("testdestdir");
+ if (dir == null || dir.length() == 0) {
+ dir = ".";
+ }
+ TEST_DIR = new File(dir, "tmp");
+ }
+
+ public static void printTestName(String name) {
+ // don't want verbose printing for now
+ // System.out.println(name);
+ }
+
+ public static File getExistingDir(String name)
+ throws IOException {
+
+ File dir = new File(TEST_DIR, name);
+ if (!dir.exists() || !dir.isDirectory()) {
+ throw new IllegalStateException(
+ "Not an existing directory: " + dir);
+ }
+ return dir;
+ }
+
+ public static File getNewDir()
+ throws IOException {
+
+ return getNewDir("test-dir");
+ }
+
+ public static File getNewDir(String name)
+ throws IOException {
+
+ File dir = new File(TEST_DIR, name);
+ if (dir.isDirectory()) {
+ String[] files = dir.list();
+ if (files != null) {
+ for (int i = 0; i < files.length; i += 1) {
+ new File(dir, files[i]).delete();
+ }
+ }
+ } else {
+ dir.delete();
+ dir.mkdirs();
+ }
+ return dir;
+ }
+
+ public static File getNewFile()
+ throws IOException {
+
+ return getNewFile("test-file");
+ }
+
+ public static File getNewFile(String name)
+ throws IOException {
+
+ return getNewFile(TEST_DIR, name);
+ }
+
+ public static File getNewFile(File dir, String name)
+ throws IOException {
+
+ File file = new File(dir, name);
+ file.delete();
+ return file;
+ }
+
+ public static boolean copyResource(Class cls, String fileName, File toDir)
+ throws IOException {
+
+ InputStream in = cls.getResourceAsStream("testdata/" + fileName);
+ if (in == null) {
+ return false;
+ }
+ in = new BufferedInputStream(in);
+ File file = new File(toDir, fileName);
+ OutputStream out = new FileOutputStream(file);
+ out = new BufferedOutputStream(out);
+ int c;
+ while ((c = in.read()) >= 0) out.write(c);
+ in.close();
+ out.close();
+ return true;
+ }
+
+ public static String qualifiedTestName(TestCase test) {
+
+ String s = test.getClass().getName();
+ int i = s.lastIndexOf('.');
+ if (i >= 0) {
+ s = s.substring(i + 1);
+ }
+ return s + '.' + test.getName();
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/ForeignKeyTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/ForeignKeyTest.java
new file mode 100644
index 0000000..8dc2759
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/ForeignKeyTest.java
@@ -0,0 +1,342 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator;
+import com.sleepycat.bind.serial.test.MarshalledObject;
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.collections.TupleSerialFactory;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.ForeignKeyDeleteAction;
+import com.sleepycat.db.SecondaryConfig;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class ForeignKeyTest extends TestCase {
+
+ private static final ForeignKeyDeleteAction[] ACTIONS = {
+ ForeignKeyDeleteAction.ABORT,
+ ForeignKeyDeleteAction.NULLIFY,
+ ForeignKeyDeleteAction.CASCADE,
+ };
+ private static final String[] ACTION_LABELS = {
+ "ABORT",
+ "NULLIFY",
+ "CASCADE",
+ };
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+ for (int j = 0; j < ACTIONS.length; j += 1) {
+ suite.addTest(new ForeignKeyTest(TestEnv.ALL[i],
+ ACTIONS[j],
+ ACTION_LABELS[j]));
+ }
+ }
+ return suite;
+ }
+
+ private TestEnv testEnv;
+ private Environment env;
+ private StoredClassCatalog catalog;
+ private TupleSerialFactory factory;
+ private Database store1;
+ private Database store2;
+ private SecondaryDatabase index1;
+ private SecondaryDatabase index2;
+ private Map storeMap1;
+ private Map storeMap2;
+ private Map indexMap1;
+ private Map indexMap2;
+ private final ForeignKeyDeleteAction onDelete;
+
+ public ForeignKeyTest(TestEnv testEnv, ForeignKeyDeleteAction onDelete,
+ String onDeleteLabel) {
+
+ super("ForeignKeyTest-" + testEnv.getName() + '-' + onDeleteLabel);
+
+ this.testEnv = testEnv;
+ this.onDelete = onDelete;
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.printTestName(getName());
+ env = testEnv.open(getName());
+
+ createDatabase();
+ }
+
+ @Override
+ public void tearDown() {
+
+ try {
+ if (index1 != null) {
+ index1.close();
+ }
+ if (index2 != null) {
+ index2.close();
+ }
+ if (store1 != null) {
+ store1.close();
+ }
+ if (store2 != null) {
+ store2.close();
+ }
+ if (catalog != null) {
+ catalog.close();
+ }
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ } finally {
+ /* Ensure that GC can cleanup. */
+ env = null;
+ testEnv = null;
+ catalog = null;
+ store1 = null;
+ store2 = null;
+ index1 = null;
+ index2 = null;
+ factory = null;
+ storeMap1 = null;
+ storeMap2 = null;
+ indexMap1 = null;
+ indexMap2 = null;
+ }
+ }
+
+ @Override
+ public void runTest()
+ throws Exception {
+
+ try {
+ createViews();
+ writeAndRead();
+ } catch (Exception e) {
+ throw ExceptionUnwrapper.unwrap(e);
+ }
+ }
+
+ private void createDatabase()
+ throws Exception {
+
+ catalog = new StoredClassCatalog(openDb("catalog.db"));
+ factory = new TupleSerialFactory(catalog);
+ assertSame(catalog, factory.getCatalog());
+
+ store1 = openDb("store1.db");
+ store2 = openDb("store2.db");
+ index1 = openSecondaryDb(factory, "1", store1, "index1.db", null);
+ index2 = openSecondaryDb(factory, "2", store2, "index2.db", store1);
+ }
+
+ private Database openDb(String file)
+ throws Exception {
+
+ DatabaseConfig config = new DatabaseConfig();
+ DbCompat.setTypeBtree(config);
+ config.setTransactional(testEnv.isTxnMode());
+ config.setAllowCreate(true);
+
+ return DbCompat.testOpenDatabase(env, null, file, null, config);
+ }
+
+ private SecondaryDatabase openSecondaryDb(TupleSerialFactory factory,
+ String keyName,
+ Database primary,
+ String file,
+ Database foreignStore)
+ throws Exception {
+
+ TupleSerialMarshalledKeyCreator keyCreator =
+ factory.getKeyCreator(MarshalledObject.class, keyName);
+
+ SecondaryConfig secConfig = new SecondaryConfig();
+ DbCompat.setTypeBtree(secConfig);
+ secConfig.setTransactional(testEnv.isTxnMode());
+ secConfig.setAllowCreate(true);
+ secConfig.setKeyCreator(keyCreator);
+ if (foreignStore != null) {
+ secConfig.setForeignKeyDatabase(foreignStore);
+ secConfig.setForeignKeyDeleteAction(onDelete);
+ if (onDelete == ForeignKeyDeleteAction.NULLIFY) {
+ secConfig.setForeignKeyNullifier(keyCreator);
+ }
+ }
+
+ return DbCompat.testOpenSecondaryDatabase
+ (env, null, file, null, primary, secConfig);
+ }
+
+ private void createViews() {
+ storeMap1 = factory.newMap(store1, String.class,
+ MarshalledObject.class, true);
+ storeMap2 = factory.newMap(store2, String.class,
+ MarshalledObject.class, true);
+ indexMap1 = factory.newMap(index1, String.class,
+ MarshalledObject.class, true);
+ indexMap2 = factory.newMap(index2, String.class,
+ MarshalledObject.class, true);
+ }
+
+ private void writeAndRead()
+ throws Exception {
+
+ CurrentTransaction txn = CurrentTransaction.getInstance(env);
+ if (txn != null) {
+ txn.beginTransaction(null);
+ }
+
+ MarshalledObject o1 = new MarshalledObject("data1", "pk1", "ik1", "");
+ assertNull(storeMap1.put(null, o1));
+
+ assertEquals(o1, storeMap1.get("pk1"));
+ assertEquals(o1, indexMap1.get("ik1"));
+
+ MarshalledObject o2 = new MarshalledObject("data2", "pk2", "", "pk1");
+ assertNull(storeMap2.put(null, o2));
+
+ assertEquals(o2, storeMap2.get("pk2"));
+ assertEquals(o2, indexMap2.get("pk1"));
+
+ if (txn != null) {
+ txn.commitTransaction();
+ txn.beginTransaction(null);
+ }
+
+ /*
+ * store1 contains o1 with primary key "pk1" and index key "ik1".
+ *
+ * store2 contains o2 with primary key "pk2" and foreign key "pk1",
+ * which is the primary key of store1.
+ */
+
+ if (onDelete == ForeignKeyDeleteAction.ABORT) {
+
+ /* Test that we abort trying to delete a referenced key. */
+
+ try {
+ storeMap1.remove("pk1");
+ fail();
+ } catch (RuntimeExceptionWrapper expected) {
+ assertTrue(expected.getCause() instanceof DatabaseException);
+ assertTrue(!DbCompat.NEW_JE_EXCEPTIONS);
+ }
+ if (txn != null) {
+ txn.abortTransaction();
+ txn.beginTransaction(null);
+ }
+
+ /* Test that we can put a record into store2 with a null foreign
+ * key value. */
+
+ o2 = new MarshalledObject("data2", "pk2", "", "");
+ assertNotNull(storeMap2.put(null, o2));
+ assertEquals(o2, storeMap2.get("pk2"));
+
+ /* The index2 record should have been deleted since the key was set
+ * to null above. */
+
+ assertNull(indexMap2.get("pk1"));
+
+ /* Test that now we can delete the record in store1, since it is no
+ * longer referenced. */
+
+ assertNotNull(storeMap1.remove("pk1"));
+ assertNull(storeMap1.get("pk1"));
+ assertNull(indexMap1.get("ik1"));
+
+ } else if (onDelete == ForeignKeyDeleteAction.NULLIFY) {
+
+ /* Delete the referenced key. */
+
+ assertNotNull(storeMap1.remove("pk1"));
+ assertNull(storeMap1.get("pk1"));
+ assertNull(indexMap1.get("ik1"));
+
+ /* The store2 record should still exist, but should have an empty
+ * secondary key since it was nullified. */
+
+ o2 = (MarshalledObject) storeMap2.get("pk2");
+ assertNotNull(o2);
+ assertEquals("data2", o2.getData());
+ assertEquals("pk2", o2.getPrimaryKey());
+ assertEquals("", o2.getIndexKey1());
+ assertEquals("", o2.getIndexKey2());
+
+ } else if (onDelete == ForeignKeyDeleteAction.CASCADE) {
+
+ /* Delete the referenced key. */
+
+ assertNotNull(storeMap1.remove("pk1"));
+ assertNull(storeMap1.get("pk1"));
+ assertNull(indexMap1.get("ik1"));
+
+ /* The store2 record should have deleted also. */
+
+ assertNull(storeMap2.get("pk2"));
+ assertNull(indexMap2.get("pk1"));
+
+ } else {
+ throw new IllegalStateException();
+ }
+
+ /*
+ * Test that a foreign key value may not be used that is not present
+ * in the foreign store. "pk2" is not in store1 in this case.
+ */
+ assertNull(storeMap1.get("pk2"));
+ MarshalledObject o3 = new MarshalledObject("data3", "pk3", "", "pk2");
+ try {
+ storeMap2.put(null, o3);
+ fail();
+ } catch (RuntimeExceptionWrapper expected) {
+ assertTrue(expected.getCause() instanceof DatabaseException);
+ assertTrue(!DbCompat.NEW_JE_EXCEPTIONS);
+ }
+
+ if (txn != null) {
+ txn.abortTransaction();
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java
new file mode 100644
index 0000000..59509e9
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/IterDeadlockTest.java
@@ -0,0 +1,228 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.Iterator;
+import java.util.ListIterator;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.ByteArrayBinding;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.DeadlockException;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Tests the fix for [#10516], where the StoredIterator constructor was not
+ * closing the cursor when an exception occurred. For example, a deadlock
+ * exception might occur if the constructor was unable to move the cursor to
+ * the first element.
+ * @author Mark Hayes
+ */
+public class IterDeadlockTest extends TestCase {
+
+ private static final byte[] ONE = { 1 };
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(IterDeadlockTest.class);
+ return suite;
+ }
+
+ private Environment env;
+ private Database store1;
+ private Database store2;
+ private StoredSortedMap map1;
+ private StoredSortedMap map2;
+ private final ByteArrayBinding binding = new ByteArrayBinding();
+
+ public IterDeadlockTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ env = TestEnv.TXN.open("IterDeadlockTest");
+ store1 = openDb("store1.db");
+ store2 = openDb("store2.db");
+ map1 = new StoredSortedMap(store1, binding, binding, true);
+ map2 = new StoredSortedMap(store2, binding, binding, true);
+ }
+
+ @Override
+ public void tearDown() {
+
+ if (store1 != null) {
+ try {
+ store1.close();
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ }
+ }
+ if (store2 != null) {
+ try {
+ store2.close();
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ }
+ }
+ if (env != null) {
+ try {
+ env.close();
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ }
+ }
+ /* Allow GC of DB objects in the test case. */
+ env = null;
+ store1 = null;
+ store2 = null;
+ map1 = null;
+ map2 = null;
+ }
+
+ private Database openDb(String file)
+ throws Exception {
+
+ DatabaseConfig config = new DatabaseConfig();
+ DbCompat.setTypeBtree(config);
+ config.setTransactional(true);
+ config.setAllowCreate(true);
+
+ return DbCompat.testOpenDatabase(env, null, file, null, config);
+ }
+
+ public void testIterDeadlock()
+ throws Exception {
+
+ final Object parent = new Object();
+ final Object child1 = new Object();
+ final Object child2 = new Object();
+ final TransactionRunner runner = new TransactionRunner(env);
+ runner.setMaxRetries(0);
+
+ /* Write a record in each db. */
+ runner.run(new TransactionWorker() {
+ public void doWork() {
+ assertNull(map1.put(ONE, ONE));
+ assertNull(map2.put(ONE, ONE));
+ }
+ });
+
+ /*
+ * A thread to open iterator 1, then wait to be notified, then open
+ * iterator 2.
+ */
+ final Thread thread1 = new Thread(new Runnable() {
+ public void run() {
+ try {
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ synchronized (child1) {
+ ListIterator i1 =
+ (ListIterator) map1.values().iterator();
+ i1.next();
+ i1.set(ONE); /* Write lock. */
+ StoredIterator.close(i1);
+ synchronized (parent) { parent.notify(); }
+ child1.wait();
+ Iterator i2 = map2.values().iterator();
+ assertTrue(i2.hasNext());
+ StoredIterator.close(i2);
+ }
+ }
+ });
+ } catch (DeadlockException expected) {
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail(e.toString());
+ }
+ }
+ });
+
+ /*
+ * A thread to open iterator 2, then wait to be notified, then open
+ * iterator 1.
+ */
+ final Thread thread2 = new Thread(new Runnable() {
+ public void run() {
+ try {
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ synchronized (child2) {
+ ListIterator i2 =
+ (ListIterator) map2.values().iterator();
+ i2.next();
+ i2.set(ONE); /* Write lock. */
+ StoredIterator.close(i2);
+ synchronized (parent) { parent.notify(); }
+ child2.wait();
+ Iterator i1 = map1.values().iterator();
+ assertTrue(i1.hasNext());
+ StoredIterator.close(i1);
+ }
+ }
+ });
+ } catch (DeadlockException expected) {
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail(e.toString());
+ }
+ }
+ });
+
+ /*
+ * Open iterator 1 in thread 1, then iterator 2 in thread 2, then let
+ * the threads run to open the other iterators and cause a deadlock.
+ */
+ synchronized (parent) {
+ thread1.start();
+ parent.wait();
+ thread2.start();
+ parent.wait();
+ synchronized (child1) { child1.notify(); }
+ synchronized (child2) { child2.notify(); }
+ thread1.join();
+ thread2.join();
+ }
+
+ /*
+ * Before the fix for [#10516] we would get an exception indicating
+ * that cursors were not closed, when closing the stores below.
+ */
+ store1.close();
+ store1 = null;
+ store2.close();
+ store2 = null;
+ env.close();
+ env = null;
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/JoinTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/JoinTest.java
new file mode 100644
index 0000000..495f785
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/JoinTest.java
@@ -0,0 +1,225 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.test.MarshalledObject;
+import com.sleepycat.collections.StoredCollection;
+import com.sleepycat.collections.StoredContainer;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.collections.TupleSerialFactory;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.SecondaryConfig;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class JoinTest extends TestCase
+ implements TransactionWorker {
+
+ private static final String MATCH_DATA = "d4"; // matches both keys = "yes"
+ private static final String MATCH_KEY = "k4"; // matches both keys = "yes"
+ private static final String[] VALUES = {"yes", "yes"};
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ return new JoinTest();
+ }
+
+ private Environment env;
+ private TransactionRunner runner;
+ private StoredClassCatalog catalog;
+ private TupleSerialFactory factory;
+ private Database store;
+ private SecondaryDatabase index1;
+ private SecondaryDatabase index2;
+ private StoredMap storeMap;
+ private StoredMap indexMap1;
+ private StoredMap indexMap2;
+
+ public JoinTest() {
+
+ super("JoinTest");
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.printTestName(getName());
+ env = TestEnv.TXN.open(getName());
+ runner = new TransactionRunner(env);
+ createDatabase();
+ }
+
+ @Override
+ public void tearDown() {
+
+ try {
+ if (index1 != null) {
+ index1.close();
+ }
+ if (index2 != null) {
+ index2.close();
+ }
+ if (store != null) {
+ store.close();
+ }
+ if (catalog != null) {
+ catalog.close();
+ }
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ } finally {
+ /* Ensure that GC can cleanup. */
+ index1 = null;
+ index2 = null;
+ store = null;
+ catalog = null;
+ env = null;
+ runner = null;
+ factory = null;
+ storeMap = null;
+ indexMap1 = null;
+ indexMap2 = null;
+ }
+ }
+
+ @Override
+ public void runTest()
+ throws Exception {
+
+ runner.run(this);
+ }
+
+ public void doWork() {
+ createViews();
+ writeAndRead();
+ }
+
+ private void createDatabase()
+ throws Exception {
+
+ catalog = new StoredClassCatalog(openDb("catalog.db"));
+ factory = new TupleSerialFactory(catalog);
+ assertSame(catalog, factory.getCatalog());
+
+ store = openDb("store.db");
+ index1 = openSecondaryDb(store, "index1.db", "1");
+ index2 = openSecondaryDb(store, "index2.db", "2");
+ }
+
+ private Database openDb(String file)
+ throws Exception {
+
+ DatabaseConfig config = new DatabaseConfig();
+ DbCompat.setTypeBtree(config);
+ config.setTransactional(true);
+ config.setAllowCreate(true);
+
+ return DbCompat.testOpenDatabase(env, null, file, null, config);
+ }
+
+ private SecondaryDatabase openSecondaryDb(Database primary,
+ String file,
+ String keyName)
+ throws Exception {
+
+ SecondaryConfig secConfig = new SecondaryConfig();
+ DbCompat.setTypeBtree(secConfig);
+ secConfig.setTransactional(true);
+ secConfig.setAllowCreate(true);
+ DbCompat.setSortedDuplicates(secConfig, true);
+ secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class,
+ keyName));
+
+ return DbCompat.testOpenSecondaryDatabase
+ (env, null, file, null, primary, secConfig);
+ }
+
+ private void createViews() {
+ storeMap = factory.newMap(store, String.class,
+ MarshalledObject.class, true);
+ indexMap1 = factory.newMap(index1, String.class,
+ MarshalledObject.class, true);
+ indexMap2 = factory.newMap(index2, String.class,
+ MarshalledObject.class, true);
+ }
+
+ private void writeAndRead() {
+ // write records: Data, PrimaryKey, IndexKey1, IndexKey2
+ assertNull(storeMap.put(null,
+ new MarshalledObject("d1", "k1", "no", "yes")));
+ assertNull(storeMap.put(null,
+ new MarshalledObject("d2", "k2", "no", "no")));
+ assertNull(storeMap.put(null,
+ new MarshalledObject("d3", "k3", "no", "yes")));
+ assertNull(storeMap.put(null,
+ new MarshalledObject("d4", "k4", "yes", "yes")));
+ assertNull(storeMap.put(null,
+ new MarshalledObject("d5", "k5", "yes", "no")));
+
+ Object o;
+ Map.Entry e;
+
+ // join values with index maps
+ o = doJoin((StoredCollection) storeMap.values());
+ assertEquals(MATCH_DATA, ((MarshalledObject) o).getData());
+
+ // join keySet with index maps
+ o = doJoin((StoredCollection) storeMap.keySet());
+ assertEquals(MATCH_KEY, o);
+
+ // join entrySet with index maps
+ o = doJoin((StoredCollection) storeMap.entrySet());
+ e = (Map.Entry) o;
+ assertEquals(MATCH_KEY, e.getKey());
+ assertEquals(MATCH_DATA, ((MarshalledObject) e.getValue()).getData());
+ }
+
+ private Object doJoin(StoredCollection coll) {
+
+ StoredContainer[] indices = { indexMap1, indexMap2 };
+ StoredIterator i = coll.join(indices, VALUES, null);
+ try {
+ assertTrue(i.hasNext());
+ Object result = i.next();
+ assertNotNull(result);
+ assertFalse(i.hasNext());
+ return result;
+ } finally { i.close(); }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java
new file mode 100644
index 0000000..2ccfd66
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/NullTransactionRunner.java
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.db.Environment;
+import com.sleepycat.util.ExceptionUnwrapper;
+
+class NullTransactionRunner extends TransactionRunner {
+
+ NullTransactionRunner(Environment env) {
+
+ super(env);
+ }
+
+ public void run(TransactionWorker worker)
+ throws Exception {
+
+ try {
+ worker.doWork();
+ } catch (Exception e) {
+ throw ExceptionUnwrapper.unwrap(e);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java
new file mode 100644
index 0000000..b52e2cb
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/SecondaryDeadlockTest.java
@@ -0,0 +1,206 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.DeadlockException;
+import com.sleepycat.db.TransactionConfig;
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Tests whether secondary access can cause a self-deadlock when reading via a
+ * secondary because the collections API secondary implementation in DB 4.2
+ * opens two cursors. Part of the problem in [#10516] was because the
+ * secondary get() was not done in a txn. This problem should not occur in DB
+ * 4.3 and JE -- an ordinary deadlock occurs instead and is detected.
+ *
+ * @author Mark Hayes
+ */
+public class SecondaryDeadlockTest extends TestCase {
+
+ private static final Long N_ONE = new Long(1);
+ private static final Long N_101 = new Long(101);
+ private static final int N_ITERS = 20;
+ private static final int MAX_RETRIES = 1000;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(SecondaryDeadlockTest.class);
+ return suite;
+ }
+
+ private Environment env;
+ private Database store;
+ private Database index;
+ private StoredSortedMap storeMap;
+ private StoredSortedMap indexMap;
+ private Exception exception;
+
+ public SecondaryDeadlockTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ env = TestEnv.TXN.open("SecondaryDeadlockTest");
+ store = TestStore.BTREE_UNIQ.open(env, "store.db");
+ index = TestStore.BTREE_UNIQ.openIndex(store, "index.db");
+ storeMap = new StoredSortedMap(store,
+ TestStore.BTREE_UNIQ.getKeyBinding(),
+ TestStore.BTREE_UNIQ.getValueBinding(),
+ true);
+ indexMap = new StoredSortedMap(index,
+ TestStore.BTREE_UNIQ.getKeyBinding(),
+ TestStore.BTREE_UNIQ.getValueBinding(),
+ true);
+ }
+
+ @Override
+ public void tearDown() {
+
+ if (index != null) {
+ try {
+ index.close();
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ }
+ }
+ if (store != null) {
+ try {
+ store.close();
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ }
+ }
+ if (env != null) {
+ try {
+ env.close();
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ }
+ }
+ /* Allow GC of DB objects in the test case. */
+ env = null;
+ store = null;
+ index = null;
+ storeMap = null;
+ indexMap = null;
+ }
+
+ public void testSecondaryDeadlock()
+ throws Exception {
+
+ final TransactionRunner runner = new TransactionRunner(env);
+ runner.setMaxRetries(MAX_RETRIES);
+
+ /*
+ * This test deadlocks a lot at degree 3 serialization. In debugging
+ * this I discovered it was not due to phantom prevention per se but
+ * just to a change in timing.
+ */
+ TransactionConfig txnConfig = new TransactionConfig();
+ runner.setTransactionConfig(txnConfig);
+
+ /*
+ * A thread to do put() and delete() via the primary, which will lock
+ * the primary first then the secondary. Uses transactions.
+ */
+ final Thread thread1 = new Thread(new Runnable() {
+ public void run() {
+ try {
+ /* The TransactionRunner performs retries. */
+ for (int i = 0; i < N_ITERS; i +=1 ) {
+ runner.run(new TransactionWorker() {
+ public void doWork() {
+ assertEquals(null, storeMap.put(N_ONE, N_101));
+ }
+ });
+ runner.run(new TransactionWorker() {
+ public void doWork() {
+ assertEquals(N_101, storeMap.remove(N_ONE));
+ }
+ });
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ exception = e;
+ }
+ }
+ }, "ThreadOne");
+
+ /*
+ * A thread to get() via the secondary, which will lock the secondary
+ * first then the primary. Does not use a transaction.
+ */
+ final Thread thread2 = new Thread(new Runnable() {
+ public void run() {
+ try {
+ for (int i = 0; i < N_ITERS; i +=1 ) {
+ for (int j = 0; j < MAX_RETRIES; j += 1) {
+ try {
+ Object value = indexMap.get(N_ONE);
+ assertTrue(value == null ||
+ N_101.equals(value));
+ break;
+ } catch (Exception e) {
+ e = ExceptionUnwrapper.unwrap(e);
+ if (e instanceof DeadlockException) {
+ continue; /* Retry on deadlock. */
+ } else {
+ throw e;
+ }
+ }
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ exception = e;
+ }
+ }
+ }, "ThreadTwo");
+
+ thread1.start();
+ thread2.start();
+ thread1.join();
+ thread2.join();
+
+ index.close();
+ index = null;
+ store.close();
+ store = null;
+ env.close();
+ env = null;
+
+ if (exception != null) {
+ fail(exception.toString());
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java
new file mode 100644
index 0000000..96cd41d
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestDataBinding.java
@@ -0,0 +1,33 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.db.DatabaseEntry;
+
+/**
+ * @author Mark Hayes
+ */
+class TestDataBinding implements EntryBinding {
+
+ public Object entryToObject(DatabaseEntry data) {
+
+ if (data.getSize() != 1) {
+ throw new IllegalStateException("size=" + data.getSize());
+ }
+ byte val = data.getData()[data.getOffset()];
+ return new Long(val);
+ }
+
+ public void objectToEntry(Object object, DatabaseEntry data) {
+
+ byte val = ((Number) object).byteValue();
+ data.setData(new byte[] { val }, 0, 1);
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntity.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntity.java
new file mode 100644
index 0000000..3c895d7
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntity.java
@@ -0,0 +1,44 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+/**
+ * @author Mark Hayes
+ */
+class TestEntity {
+
+ int key;
+ int value;
+
+ TestEntity(int key, int value) {
+
+ this.key = key;
+ this.value = value;
+ }
+
+ public boolean equals(Object o) {
+
+ try {
+ TestEntity e = (TestEntity) o;
+ return e.key == key && e.value == value;
+ } catch (ClassCastException e) {
+ return false;
+ }
+ }
+
+ public int hashCode() {
+
+ return key;
+ }
+
+ public String toString() {
+
+ return "[key " + key + " value " + value + ']';
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java
new file mode 100644
index 0000000..a6d7632
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEntityBinding.java
@@ -0,0 +1,63 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.db.DatabaseEntry;
+
+/**
+ * @author Mark Hayes
+ */
+class TestEntityBinding implements EntityBinding {
+
+ private boolean isRecNum;
+
+ TestEntityBinding(boolean isRecNum) {
+
+ this.isRecNum = isRecNum;
+ }
+
+ public Object entryToObject(DatabaseEntry key, DatabaseEntry value) {
+
+ byte keyByte;
+ if (isRecNum) {
+ if (key.getSize() != 4) {
+ throw new IllegalStateException();
+ }
+ keyByte = (byte) RecordNumberBinding.entryToRecordNumber(key);
+ } else {
+ if (key.getSize() != 1) {
+ throw new IllegalStateException();
+ }
+ keyByte = key.getData()[key.getOffset()];
+ }
+ if (value.getSize() != 1) {
+ throw new IllegalStateException();
+ }
+ byte valByte = value.getData()[value.getOffset()];
+ return new TestEntity(keyByte, valByte);
+ }
+
+ public void objectToKey(Object object, DatabaseEntry key) {
+
+ byte val = (byte) ((TestEntity) object).key;
+ if (isRecNum) {
+ RecordNumberBinding.recordNumberToEntry(val, key);
+ } else {
+ key.setData(new byte[] { val }, 0, 1);
+ }
+ }
+
+ public void objectToData(Object object, DatabaseEntry value) {
+
+ byte val = (byte) ((TestEntity) object).value;
+ value.setData(new byte[] { val }, 0, 1);
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEnv.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEnv.java
new file mode 100644
index 0000000..aed6b5a
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestEnv.java
@@ -0,0 +1,130 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * @author Mark Hayes
+ */
+public class TestEnv {
+
+ public static final TestEnv BDB;
+ public static final TestEnv CDB;
+ public static final TestEnv TXN;
+ static {
+ EnvironmentConfig config;
+
+ config = newEnvConfig();
+ BDB = new TestEnv("bdb", config);
+
+ if (DbCompat.CDB) {
+ config = newEnvConfig();
+ DbCompat.setInitializeCDB(config, true);
+ CDB = new TestEnv("cdb", config);
+ } else {
+ CDB = null;
+ }
+
+ config = newEnvConfig();
+ config.setTransactional(true);
+ DbCompat.setInitializeLocking(config, true);
+ TXN = new TestEnv("txn", config);
+ }
+
+ private static EnvironmentConfig newEnvConfig() {
+
+ EnvironmentConfig config = new EnvironmentConfig();
+ if (DbCompat.MEMORY_SUBSYSTEM) {
+ DbCompat.setInitializeCache(config, true);
+ }
+ return config;
+ }
+
+ public static final TestEnv[] ALL;
+ static {
+ if (DbCompat.CDB) {
+ ALL = new TestEnv[] { BDB, CDB, TXN };
+ } else {
+ ALL = new TestEnv[] { BDB, TXN };
+ }
+ }
+
+ private String name;
+ private EnvironmentConfig config;
+
+ TestEnv(String name, EnvironmentConfig config) {
+
+ this.name = name;
+ this.config = config;
+ }
+
+ public String getName() {
+
+ return name;
+ }
+
+ public boolean isTxnMode() {
+
+ return config.getTransactional();
+ }
+
+ public boolean isCdbMode() {
+
+ return DbCompat.getInitializeCDB(config);
+ }
+
+ public Environment open(String testName)
+ throws IOException, DatabaseException {
+
+ return open(testName, true);
+ }
+
+ public Environment open(String testName, boolean create)
+ throws IOException, DatabaseException {
+
+ config.setAllowCreate(create);
+ /* OLDEST deadlock detection on DB matches the use of timeouts on JE.*/
+ DbCompat.setLockDetectModeOldest(config);
+ File dir = getDirectory(testName, create);
+ return newEnvironment(dir, config);
+ }
+
+ /**
+ * Is overridden in XACollectionTest.
+ */
+ protected Environment newEnvironment(File dir, EnvironmentConfig config)
+ throws DatabaseException, IOException {
+
+ return new Environment(dir, config);
+ }
+
+ public File getDirectory(String testName)
+ throws IOException {
+
+ return getDirectory(testName, true);
+ }
+
+ public File getDirectory(String testName, boolean create)
+ throws IOException {
+
+ if (create) {
+ return SharedTestUtils.getNewDir("db-test/" + testName);
+ } else {
+ return SharedTestUtils.getExistingDir("db-test/" + testName);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java
new file mode 100644
index 0000000..0e5ea4f
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyAssigner.java
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.collections.PrimaryKeyAssigner;
+import com.sleepycat.db.DatabaseEntry;
+
+/**
+ * @author Mark Hayes
+ */
+class TestKeyAssigner implements PrimaryKeyAssigner {
+
+ private byte next = 1;
+ private final boolean isRecNum;
+
+ TestKeyAssigner(boolean isRecNum) {
+
+ this.isRecNum = isRecNum;
+ }
+
+ public void assignKey(DatabaseEntry keyData) {
+ if (isRecNum) {
+ RecordNumberBinding.recordNumberToEntry(next, keyData);
+ } else {
+ keyData.setData(new byte[] { next }, 0, 1);
+ }
+ next += 1;
+ }
+
+ void reset() {
+
+ next = 1;
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java
new file mode 100644
index 0000000..bcee3ef
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestKeyCreator.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.db.SecondaryKeyCreator;
+
+/**
+ * Unused until secondaries are available.
+ * @author Mark Hayes
+ */
+class TestKeyCreator implements SecondaryKeyCreator {
+
+ private final boolean isRecNum;
+
+ TestKeyCreator(boolean isRecNum) {
+
+ this.isRecNum = isRecNum;
+ }
+
+ public boolean createSecondaryKey(SecondaryDatabase db,
+ DatabaseEntry primaryKeyData,
+ DatabaseEntry valueData,
+ DatabaseEntry indexKeyData) {
+ if (valueData.getSize() == 0) {
+ return false;
+ }
+ if (valueData.getSize() != 1) {
+ throw new IllegalStateException();
+ }
+ byte val = valueData.getData()[valueData.getOffset()];
+ if (val == 0) {
+ return false; // fixed-len pad value
+ }
+ val -= 100;
+ if (isRecNum) {
+ RecordNumberBinding.recordNumberToEntry(val, indexKeyData);
+ } else {
+ indexKeyData.setData(new byte[] { val }, 0, 1);
+ }
+ return true;
+ }
+
+ public void clearIndexKey(DatabaseEntry valueData) {
+
+ throw new RuntimeException("not supported");
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestSR15721.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestSR15721.java
new file mode 100644
index 0000000..749e375
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestSR15721.java
@@ -0,0 +1,119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.db.Environment;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Chao Huang
+ */
+public class TestSR15721 extends TestCase {
+
+ /**
+ * Runs a command line collection test.
+ * @see #usage
+ */
+ public static void main(String[] args) {
+ if (args.length == 1 &&
+ (args[0].equals("-h") || args[0].equals("-help"))) {
+ usage();
+ } else {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+ }
+
+ private static void usage() {
+
+ System.out.println(
+ "Usage: java com.sleepycat.collections.test.TestSR15721"
+ + " [-h | -help]\n");
+ System.exit(2);
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(TestSR15721.class);
+ return suite;
+ }
+
+ private Environment env;
+ private CurrentTransaction currentTxn;
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ env = TestEnv.TXN.open("TestSR15721");
+ currentTxn = CurrentTransaction.getInstance(env);
+ }
+
+ @Override
+ public void tearDown() {
+ try {
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ } finally {
+ /* Ensure that GC can cleanup. */
+ env = null;
+ currentTxn = null;
+ }
+ }
+
+ /**
+ * Tests that the CurrentTransaction instance doesn't indeed allow GC to
+ * reclaim while attached environment is open. [#15721]
+ */
+ public void testSR15721Fix()
+ throws Exception {
+
+ int hash = currentTxn.hashCode();
+ int hash2 = -1;
+
+ currentTxn = CurrentTransaction.getInstance(env);
+ hash2 = currentTxn.hashCode();
+ assertTrue(hash == hash2);
+
+ currentTxn.beginTransaction(null);
+ currentTxn = null;
+ hash2 = -1;
+
+ for (int i = 0; i < 10; i += 1) {
+ byte[] x = null;
+ try {
+ x = new byte[Integer.MAX_VALUE - 1];
+ fail();
+ } catch (OutOfMemoryError expected) {
+ }
+ assertNull(x);
+
+ System.gc();
+ }
+
+ currentTxn = CurrentTransaction.getInstance(env);
+ hash2 = currentTxn.hashCode();
+ currentTxn.commitTransaction();
+
+ assertTrue(hash == hash2);
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestStore.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestStore.java
new file mode 100644
index 0000000..f719193
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TestStore.java
@@ -0,0 +1,279 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.RecordNumberBinding;
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.SecondaryConfig;
+
+/**
+ * @author Mark Hayes
+ */
+class TestStore {
+
+ static final TestKeyCreator BYTE_EXTRACTOR = new TestKeyCreator(false);
+ static final TestKeyCreator RECNO_EXTRACTOR = new TestKeyCreator(true);
+ static final EntryBinding VALUE_BINDING = new TestDataBinding();
+ static final EntryBinding BYTE_KEY_BINDING = VALUE_BINDING;
+ static final EntryBinding RECNO_KEY_BINDING = new RecordNumberBinding();
+ static final EntityBinding BYTE_ENTITY_BINDING =
+ new TestEntityBinding(false);
+ static final EntityBinding RECNO_ENTITY_BINDING =
+ new TestEntityBinding(true);
+ static final TestKeyAssigner BYTE_KEY_ASSIGNER =
+ new TestKeyAssigner(false);
+ static final TestKeyAssigner RECNO_KEY_ASSIGNER =
+ new TestKeyAssigner(true);
+
+ static final TestStore BTREE_UNIQ;
+ static final TestStore BTREE_DUP;
+ static final TestStore BTREE_DUPSORT;
+ static final TestStore BTREE_RECNUM;
+ static final TestStore HASH_UNIQ;
+ static final TestStore HASH_DUP;
+ static final TestStore HASH_DUPSORT;
+ static final TestStore QUEUE;
+ static final TestStore RECNO;
+ static final TestStore RECNO_RENUM;
+
+ static final TestStore[] ALL;
+ static {
+ List list = new ArrayList();
+ SecondaryConfig config;
+
+ config = new SecondaryConfig();
+ DbCompat.setTypeBtree(config);
+ BTREE_UNIQ = new TestStore("btree-uniq", config);
+ BTREE_UNIQ.indexOf = BTREE_UNIQ;
+ list.add(BTREE_UNIQ);
+
+ if (DbCompat.INSERTION_ORDERED_DUPLICATES) {
+ config = new SecondaryConfig();
+ DbCompat.setTypeBtree(config);
+ DbCompat.setUnsortedDuplicates(config, true);
+ BTREE_DUP = new TestStore("btree-dup", config);
+ BTREE_DUP.indexOf = null; // indexes must use sorted dups
+ list.add(BTREE_DUP);
+ } else {
+ BTREE_DUP = null;
+ }
+
+ config = new SecondaryConfig();
+ DbCompat.setTypeBtree(config);
+ DbCompat.setSortedDuplicates(config, true);
+ BTREE_DUPSORT = new TestStore("btree-dupsort", config);
+ BTREE_DUPSORT.indexOf = BTREE_UNIQ;
+ list.add(BTREE_DUPSORT);
+
+ if (DbCompat.BTREE_RECNUM_METHOD) {
+ config = new SecondaryConfig();
+ DbCompat.setTypeBtree(config);
+ DbCompat.setBtreeRecordNumbers(config, true);
+ BTREE_RECNUM = new TestStore("btree-recnum", config);
+ BTREE_RECNUM.indexOf = BTREE_RECNUM;
+ list.add(BTREE_RECNUM);
+ } else {
+ BTREE_RECNUM = null;
+ }
+
+ if (DbCompat.HASH_METHOD) {
+ config = new SecondaryConfig();
+ DbCompat.setTypeHash(config);
+ HASH_UNIQ = new TestStore("hash-uniq", config);
+ HASH_UNIQ.indexOf = HASH_UNIQ;
+ list.add(HASH_UNIQ);
+
+ if (DbCompat.INSERTION_ORDERED_DUPLICATES) {
+ config = new SecondaryConfig();
+ DbCompat.setTypeHash(config);
+ DbCompat.setUnsortedDuplicates(config, true);
+ HASH_DUP = new TestStore("hash-dup", config);
+ HASH_DUP.indexOf = null; // indexes must use sorted dups
+ list.add(HASH_DUP);
+ } else {
+ HASH_DUP = null;
+ }
+
+ config = new SecondaryConfig();
+ DbCompat.setTypeHash(config);
+ DbCompat.setSortedDuplicates(config, true);
+ HASH_DUPSORT = new TestStore("hash-dupsort", config);
+ HASH_DUPSORT.indexOf = HASH_UNIQ;
+ list.add(HASH_DUPSORT);
+ } else {
+ HASH_UNIQ = null;
+ HASH_DUP = null;
+ HASH_DUPSORT = null;
+ }
+
+ if (DbCompat.QUEUE_METHOD) {
+ config = new SecondaryConfig();
+ DbCompat.setTypeQueue(config);
+ QUEUE = new TestStore("queue", config);
+ QUEUE.indexOf = QUEUE;
+ list.add(QUEUE);
+ } else {
+ QUEUE = null;
+ }
+
+ if (DbCompat.RECNO_METHOD) {
+ config = new SecondaryConfig();
+ DbCompat.setTypeRecno(config);
+ RECNO = new TestStore("recno", config);
+ RECNO.indexOf = RECNO;
+ list.add(RECNO);
+
+ config = new SecondaryConfig();
+ DbCompat.setTypeRecno(config);
+ DbCompat.setRenumbering(config, true);
+ RECNO_RENUM = new TestStore("recno-renum", config);
+ RECNO_RENUM.indexOf = null; // indexes must have stable keys
+ list.add(RECNO_RENUM);
+ } else {
+ RECNO = null;
+ RECNO_RENUM = null;
+ }
+
+ ALL = new TestStore[list.size()];
+ list.toArray(ALL);
+ }
+
+ private String name;
+ private SecondaryConfig config;
+ private TestStore indexOf;
+ private boolean isRecNumFormat;
+
+ private TestStore(String name, SecondaryConfig config) {
+
+ this.name = name;
+ this.config = config;
+
+ isRecNumFormat = isQueueOrRecno() ||
+ (DbCompat.isTypeBtree(config) &&
+ DbCompat.getBtreeRecordNumbers(config));
+ }
+
+ EntryBinding getValueBinding() {
+
+ return VALUE_BINDING;
+ }
+
+ EntryBinding getKeyBinding() {
+
+ return isRecNumFormat ? RECNO_KEY_BINDING : BYTE_KEY_BINDING;
+ }
+
+ EntityBinding getEntityBinding() {
+
+ return isRecNumFormat ? RECNO_ENTITY_BINDING : BYTE_ENTITY_BINDING;
+ }
+
+ TestKeyAssigner getKeyAssigner() {
+
+ if (isQueueOrRecno()) {
+ return null;
+ } else {
+ if (isRecNumFormat) {
+ return RECNO_KEY_ASSIGNER;
+ } else {
+ return BYTE_KEY_ASSIGNER;
+ }
+ }
+ }
+
+ String getName() {
+
+ return name;
+ }
+
+ boolean isOrdered() {
+
+ return !DbCompat.isTypeHash(config);
+ }
+
+ boolean isQueueOrRecno() {
+
+ return DbCompat.isTypeQueue(config) || DbCompat.isTypeRecno(config);
+ }
+
+ boolean areKeyRangesAllowed() {
+ return isOrdered() && !isQueueOrRecno();
+ }
+
+ boolean areDuplicatesAllowed() {
+
+ return DbCompat.getSortedDuplicates(config) ||
+ DbCompat.getUnsortedDuplicates(config);
+ }
+
+ boolean hasRecNumAccess() {
+
+ return isRecNumFormat;
+ }
+
+ boolean areKeysRenumbered() {
+
+ return hasRecNumAccess() &&
+ (DbCompat.isTypeBtree(config) ||
+ DbCompat.getRenumbering(config));
+ }
+
+ TestStore getIndexOf() {
+
+ return DbCompat.SECONDARIES ? indexOf : null;
+ }
+
+ Database open(Environment env, String fileName)
+ throws DatabaseException {
+
+ int fixedLen = (isQueueOrRecno() ? 1 : 0);
+ return openDb(env, fileName, fixedLen, null);
+ }
+
+ Database openIndex(Database primary, String fileName)
+ throws DatabaseException {
+
+ int fixedLen = (isQueueOrRecno() ? 4 : 0);
+ config.setKeyCreator(isRecNumFormat ? RECNO_EXTRACTOR
+ : BYTE_EXTRACTOR);
+ Environment env = primary.getEnvironment();
+ return openDb(env, fileName, fixedLen, primary);
+ }
+
+ private Database openDb(Environment env, String fileName, int fixedLen,
+ Database primary)
+ throws DatabaseException {
+
+ if (fixedLen > 0) {
+ DbCompat.setRecordLength(config, fixedLen);
+ DbCompat.setRecordPad(config, 0);
+ } else {
+ DbCompat.setRecordLength(config, 0);
+ }
+ config.setAllowCreate(true);
+ DbCompat.setReadUncommitted(config, true);
+ config.setTransactional(CurrentTransaction.getInstance(env) != null);
+ if (primary != null) {
+ return DbCompat.testOpenSecondaryDatabase
+ (env, null, fileName, null, primary, config);
+ } else {
+ return DbCompat.testOpenDatabase
+ (env, null, fileName, null, config);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java
new file mode 100644
index 0000000..a427f2a
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/TransactionTest.java
@@ -0,0 +1,838 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.collections.test;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.collections.CurrentTransaction;
+import com.sleepycat.collections.StoredCollections;
+import com.sleepycat.collections.StoredContainer;
+import com.sleepycat.collections.StoredIterator;
+import com.sleepycat.collections.StoredList;
+import com.sleepycat.collections.StoredSortedMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Cursor;
+import com.sleepycat.db.CursorConfig;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.DeadlockException;
+import com.sleepycat.db.OperationStatus;
+import com.sleepycat.db.Transaction;
+import com.sleepycat.db.TransactionConfig;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class TransactionTest extends TestCase {
+
+ private static final Long ONE = new Long(1);
+ private static final Long TWO = new Long(2);
+ private static final Long THREE = new Long(3);
+
+ /**
+ * Runs a command line collection test.
+ * @see #usage
+ */
+ public static void main(String[] args) {
+ if (args.length == 1 &&
+ (args[0].equals("-h") || args[0].equals("-help"))) {
+ usage();
+ } else {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+ }
+
+ private static void usage() {
+
+ System.out.println(
+ "Usage: java com.sleepycat.collections.test.TransactionTest"
+ + " [-h | -help]\n");
+ System.exit(2);
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(TransactionTest.class);
+ return suite;
+ }
+
+ private Environment env;
+ private CurrentTransaction currentTxn;
+ private Database store;
+ private StoredSortedMap map;
+ private TestStore testStore = TestStore.BTREE_UNIQ;
+
+ public TransactionTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.printTestName(SharedTestUtils.qualifiedTestName(this));
+ env = TestEnv.TXN.open("TransactionTests");
+ currentTxn = CurrentTransaction.getInstance(env);
+ store = testStore.open(env, dbName(0));
+ map = new StoredSortedMap(store, testStore.getKeyBinding(),
+ testStore.getValueBinding(), true);
+ }
+
+ @Override
+ public void tearDown() {
+
+ try {
+ if (store != null) {
+ store.close();
+ }
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ } finally {
+ /* Ensure that GC can cleanup. */
+ store = null;
+ env = null;
+ currentTxn = null;
+ map = null;
+ testStore = null;
+ }
+ }
+
+ private String dbName(int i) {
+
+ return "txn-test-" + getName() + '-' + i;
+ }
+
+ public void testGetters()
+ throws Exception {
+
+ assertNotNull(env);
+ assertNotNull(currentTxn);
+ assertNull(currentTxn.getTransaction());
+
+ currentTxn.beginTransaction(null);
+ assertNotNull(currentTxn.getTransaction());
+ currentTxn.commitTransaction();
+ assertNull(currentTxn.getTransaction());
+
+ currentTxn.beginTransaction(null);
+ assertNotNull(currentTxn.getTransaction());
+ currentTxn.abortTransaction();
+ assertNull(currentTxn.getTransaction());
+
+ // read-uncommitted property should be inherited
+
+ assertTrue(!isReadUncommitted(map));
+ assertTrue(!isReadUncommitted(map.values()));
+ assertTrue(!isReadUncommitted(map.keySet()));
+ assertTrue(!isReadUncommitted(map.entrySet()));
+
+ StoredSortedMap other = (StoredSortedMap)
+ StoredCollections.configuredMap
+ (map, CursorConfig.READ_UNCOMMITTED);
+ assertTrue(isReadUncommitted(other));
+ assertTrue(isReadUncommitted(other.values()));
+ assertTrue(isReadUncommitted(other.keySet()));
+ assertTrue(isReadUncommitted(other.entrySet()));
+ assertTrue(!isReadUncommitted(map));
+ assertTrue(!isReadUncommitted(map.values()));
+ assertTrue(!isReadUncommitted(map.keySet()));
+ assertTrue(!isReadUncommitted(map.entrySet()));
+
+ // read-committed property should be inherited
+
+ assertTrue(!isReadCommitted(map));
+ assertTrue(!isReadCommitted(map.values()));
+ assertTrue(!isReadCommitted(map.keySet()));
+ assertTrue(!isReadCommitted(map.entrySet()));
+
+ other = (StoredSortedMap)
+ StoredCollections.configuredMap
+ (map, CursorConfig.READ_COMMITTED);
+ assertTrue(isReadCommitted(other));
+ assertTrue(isReadCommitted(other.values()));
+ assertTrue(isReadCommitted(other.keySet()));
+ assertTrue(isReadCommitted(other.entrySet()));
+ assertTrue(!isReadCommitted(map));
+ assertTrue(!isReadCommitted(map.values()));
+ assertTrue(!isReadCommitted(map.keySet()));
+ assertTrue(!isReadCommitted(map.entrySet()));
+ }
+
+ public void testTransactional()
+ throws Exception {
+
+ // is transactional because DB_AUTO_COMMIT was passed to
+ // Database.open()
+ //
+ assertTrue(map.isTransactional());
+ store.close();
+ store = null;
+
+ // is not transactional
+ //
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ DbCompat.setTypeBtree(dbConfig);
+ dbConfig.setAllowCreate(true);
+ Database db = DbCompat.testOpenDatabase
+ (env, null, dbName(1), null, dbConfig);
+ map = new StoredSortedMap(db, testStore.getKeyBinding(),
+ testStore.getValueBinding(), true);
+ assertTrue(!map.isTransactional());
+ map.put(ONE, ONE);
+ readCheck(map, ONE, ONE);
+ db.close();
+
+ // is transactional
+ //
+ dbConfig.setTransactional(true);
+ currentTxn.beginTransaction(null);
+ db = DbCompat.testOpenDatabase
+ (env, currentTxn.getTransaction(), dbName(2), null, dbConfig);
+ currentTxn.commitTransaction();
+ map = new StoredSortedMap(db, testStore.getKeyBinding(),
+ testStore.getValueBinding(), true);
+ assertTrue(map.isTransactional());
+ currentTxn.beginTransaction(null);
+ map.put(ONE, ONE);
+ readCheck(map, ONE, ONE);
+ currentTxn.commitTransaction();
+ db.close();
+ }
+
+ public void testExceptions()
+ throws Exception {
+
+ try {
+ currentTxn.commitTransaction();
+ fail();
+ } catch (IllegalStateException expected) {}
+
+ try {
+ currentTxn.abortTransaction();
+ fail();
+ } catch (IllegalStateException expected) {}
+ }
+
+ public void testNested()
+ throws Exception {
+
+ if (!DbCompat.NESTED_TRANSACTIONS) {
+ return;
+ }
+ assertNull(currentTxn.getTransaction());
+
+ Transaction txn1 = currentTxn.beginTransaction(null);
+ assertNotNull(txn1);
+ assertTrue(txn1 == currentTxn.getTransaction());
+
+ assertNull(map.get(ONE));
+ assertNull(map.put(ONE, ONE));
+ assertEquals(ONE, map.get(ONE));
+
+ Transaction txn2 = currentTxn.beginTransaction(null);
+ assertNotNull(txn2);
+ assertTrue(txn2 == currentTxn.getTransaction());
+ assertTrue(txn1 != txn2);
+
+ assertNull(map.put(TWO, TWO));
+ assertEquals(TWO, map.get(TWO));
+
+ Transaction txn3 = currentTxn.beginTransaction(null);
+ assertNotNull(txn3);
+ assertTrue(txn3 == currentTxn.getTransaction());
+ assertTrue(txn1 != txn2);
+ assertTrue(txn1 != txn3);
+ assertTrue(txn2 != txn3);
+
+ assertNull(map.put(THREE, THREE));
+ assertEquals(THREE, map.get(THREE));
+
+ Transaction txn = currentTxn.abortTransaction();
+ assertTrue(txn == txn2);
+ assertTrue(txn == currentTxn.getTransaction());
+ assertNull(map.get(THREE));
+ assertEquals(TWO, map.get(TWO));
+
+ txn3 = currentTxn.beginTransaction(null);
+ assertNotNull(txn3);
+ assertTrue(txn3 == currentTxn.getTransaction());
+ assertTrue(txn1 != txn2);
+ assertTrue(txn1 != txn3);
+ assertTrue(txn2 != txn3);
+
+ assertNull(map.put(THREE, THREE));
+ assertEquals(THREE, map.get(THREE));
+
+ txn = currentTxn.commitTransaction();
+ assertTrue(txn == txn2);
+ assertTrue(txn == currentTxn.getTransaction());
+ assertEquals(THREE, map.get(THREE));
+ assertEquals(TWO, map.get(TWO));
+
+ txn = currentTxn.commitTransaction();
+ assertTrue(txn == txn1);
+ assertTrue(txn == currentTxn.getTransaction());
+ assertEquals(THREE, map.get(THREE));
+ assertEquals(TWO, map.get(TWO));
+ assertEquals(ONE, map.get(ONE));
+
+ txn = currentTxn.commitTransaction();
+ assertNull(txn);
+ assertNull(currentTxn.getTransaction());
+ assertEquals(THREE, map.get(THREE));
+ assertEquals(TWO, map.get(TWO));
+ assertEquals(ONE, map.get(ONE));
+ }
+
+ public void testRunnerCommit()
+ throws Exception {
+
+ commitTest(false);
+ }
+
+ public void testExplicitCommit()
+ throws Exception {
+
+ commitTest(true);
+ }
+
+ private void commitTest(final boolean explicit)
+ throws Exception {
+
+ final TransactionRunner runner = new TransactionRunner(env);
+ runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS);
+
+ assertNull(currentTxn.getTransaction());
+
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ final Transaction txn1 = currentTxn.getTransaction();
+ assertNotNull(txn1);
+ assertNull(map.put(ONE, ONE));
+ assertEquals(ONE, map.get(ONE));
+
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ final Transaction txn2 = currentTxn.getTransaction();
+ assertNotNull(txn2);
+ if (DbCompat.NESTED_TRANSACTIONS) {
+ assertTrue(txn1 != txn2);
+ } else {
+ assertTrue(txn1 == txn2);
+ }
+ assertNull(map.put(TWO, TWO));
+ assertEquals(TWO, map.get(TWO));
+ assertEquals(ONE, map.get(ONE));
+ if (DbCompat.NESTED_TRANSACTIONS && explicit) {
+ currentTxn.commitTransaction();
+ }
+ }
+ });
+
+ Transaction txn3 = currentTxn.getTransaction();
+ assertSame(txn1, txn3);
+
+ assertEquals(TWO, map.get(TWO));
+ assertEquals(ONE, map.get(ONE));
+ }
+ });
+
+ assertNull(currentTxn.getTransaction());
+ }
+
+ public void testRunnerAbort()
+ throws Exception {
+
+ abortTest(false);
+ }
+
+ public void testExplicitAbort()
+ throws Exception {
+
+ abortTest(true);
+ }
+
+ private void abortTest(final boolean explicit)
+ throws Exception {
+
+ final TransactionRunner runner = new TransactionRunner(env);
+ runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS);
+
+ assertNull(currentTxn.getTransaction());
+
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ final Transaction txn1 = currentTxn.getTransaction();
+ assertNotNull(txn1);
+ assertNull(map.put(ONE, ONE));
+ assertEquals(ONE, map.get(ONE));
+
+ if (DbCompat.NESTED_TRANSACTIONS) {
+ try {
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ final Transaction txn2 =
+ currentTxn.getTransaction();
+ assertNotNull(txn2);
+ assertTrue(txn1 != txn2);
+ assertNull(map.put(TWO, TWO));
+ assertEquals(TWO, map.get(TWO));
+ if (explicit) {
+ currentTxn.abortTransaction();
+ } else {
+ throw new IllegalArgumentException(
+ "test-abort");
+ }
+ }
+ });
+ assertTrue(explicit);
+ } catch (IllegalArgumentException e) {
+ assertTrue(!explicit);
+ assertEquals("test-abort", e.getMessage());
+ }
+ }
+
+ Transaction txn3 = currentTxn.getTransaction();
+ assertSame(txn1, txn3);
+
+ assertEquals(ONE, map.get(ONE));
+ assertNull(map.get(TWO));
+ }
+ });
+
+ assertNull(currentTxn.getTransaction());
+ }
+
+ public void testReadCommittedCollection()
+ throws Exception {
+
+ StoredSortedMap degree2Map = (StoredSortedMap)
+ StoredCollections.configuredSortedMap
+ (map, CursorConfig.READ_COMMITTED);
+
+ // original map is not read-committed
+ assertTrue(!isReadCommitted(map));
+
+ // all read-committed containers are read-uncommitted
+ assertTrue(isReadCommitted(degree2Map));
+ assertTrue(isReadCommitted
+ (StoredCollections.configuredMap
+ (map, CursorConfig.READ_COMMITTED)));
+ assertTrue(isReadCommitted
+ (StoredCollections.configuredCollection
+ (map.values(), CursorConfig.READ_COMMITTED)));
+ assertTrue(isReadCommitted
+ (StoredCollections.configuredSet
+ (map.keySet(), CursorConfig.READ_COMMITTED)));
+ assertTrue(isReadCommitted
+ (StoredCollections.configuredSortedSet
+ ((SortedSet) map.keySet(),
+ CursorConfig.READ_COMMITTED)));
+
+ if (DbCompat.RECNO_METHOD) {
+ // create a list just so we can call configuredList()
+ Database listStore = TestStore.RECNO_RENUM.open(env, "foo");
+ List list = new StoredList(listStore, TestStore.VALUE_BINDING,
+ true);
+ assertTrue(isReadCommitted
+ (StoredCollections.configuredList
+ (list, CursorConfig.READ_COMMITTED)));
+ listStore.close();
+ }
+
+ map.put(ONE, ONE);
+ doReadCommitted(degree2Map, null);
+ }
+
+ private static boolean isReadCommitted(Object container) {
+ StoredContainer storedContainer = (StoredContainer) container;
+ /* We can't use getReadCommitted until is is added to DB core. */
+ return storedContainer.getCursorConfig() != null &&
+ storedContainer.getCursorConfig().getReadCommitted();
+ }
+
+ public void testReadCommittedTransaction()
+ throws Exception {
+
+ TransactionConfig config = new TransactionConfig();
+ config.setReadCommitted(true);
+ doReadCommitted(map, config);
+ }
+
+ private void doReadCommitted(final StoredSortedMap degree2Map,
+ TransactionConfig txnConfig)
+ throws Exception {
+
+ map.put(ONE, ONE);
+ TransactionRunner runner = new TransactionRunner(env);
+ runner.setTransactionConfig(txnConfig);
+ assertNull(currentTxn.getTransaction());
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ assertNotNull(currentTxn.getTransaction());
+
+ /* Do a read-committed get(), the lock is not retained. */
+ assertEquals(ONE, degree2Map.get(ONE));
+
+ /*
+ * If we were not using read-committed, the following write of
+ * key ONE with an auto-commit transaction would self-deadlock
+ * since two transactions in the same thread would be
+ * attempting to lock the same key, one for write and one for
+ * read. This test passes if we do not deadlock.
+ */
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry value = new DatabaseEntry();
+ testStore.getKeyBinding().objectToEntry(ONE, key);
+ testStore.getValueBinding().objectToEntry(TWO, value);
+ store.put(null, key, value);
+ }
+ });
+ assertNull(currentTxn.getTransaction());
+ }
+
+ public void testReadUncommittedCollection()
+ throws Exception {
+
+ StoredSortedMap dirtyMap = (StoredSortedMap)
+ StoredCollections.configuredSortedMap
+ (map, CursorConfig.READ_UNCOMMITTED);
+
+ // original map is not read-uncommitted
+ assertTrue(!isReadUncommitted(map));
+
+ // all read-uncommitted containers are read-uncommitted
+ assertTrue(isReadUncommitted(dirtyMap));
+ assertTrue(isReadUncommitted
+ (StoredCollections.configuredMap
+ (map, CursorConfig.READ_UNCOMMITTED)));
+ assertTrue(isReadUncommitted
+ (StoredCollections.configuredCollection
+ (map.values(), CursorConfig.READ_UNCOMMITTED)));
+ assertTrue(isReadUncommitted
+ (StoredCollections.configuredSet
+ (map.keySet(), CursorConfig.READ_UNCOMMITTED)));
+ assertTrue(isReadUncommitted
+ (StoredCollections.configuredSortedSet
+ ((SortedSet) map.keySet(), CursorConfig.READ_UNCOMMITTED)));
+
+ if (DbCompat.RECNO_METHOD) {
+ // create a list just so we can call configuredList()
+ Database listStore = TestStore.RECNO_RENUM.open(env, "foo");
+ List list = new StoredList(listStore, TestStore.VALUE_BINDING,
+ true);
+ assertTrue(isReadUncommitted
+ (StoredCollections.configuredList
+ (list, CursorConfig.READ_UNCOMMITTED)));
+ listStore.close();
+ }
+
+ doReadUncommitted(dirtyMap);
+ }
+
+ private static boolean isReadUncommitted(Object container) {
+ StoredContainer storedContainer = (StoredContainer) container;
+ return storedContainer.getCursorConfig() != null &&
+ storedContainer.getCursorConfig().getReadUncommitted();
+ }
+
+ public void testReadUncommittedTransaction()
+ throws Exception {
+
+ TransactionRunner runner = new TransactionRunner(env);
+ TransactionConfig config = new TransactionConfig();
+ config.setReadUncommitted(true);
+ runner.setTransactionConfig(config);
+ assertNull(currentTxn.getTransaction());
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ assertNotNull(currentTxn.getTransaction());
+ doReadUncommitted(map);
+ }
+ });
+ assertNull(currentTxn.getTransaction());
+ }
+
+ /**
+ * Tests that the CurrentTransaction static WeakHashMap does indeed allow
+ * GC to reclaim tine environment when it is closed. At one point this was
+ * not working because the value object in the map has a reference to the
+ * environment. This was fixed by wrapping the Environment in a
+ * WeakReference. [#15444]
+ *
+ * This test only succeeds intermittently, probably due to its reliance
+ * on the GC call.
+ */
+ public void testCurrentTransactionGC()
+ throws Exception {
+
+ /*
+ * This test can have indeterminate results because it depends on
+ * a finalize count, so it's not part of the default run.
+ */
+ if (!SharedTestUtils.runLongTests()) {
+ return;
+ }
+
+ final StringBuffer finalizedFlag = new StringBuffer();
+
+ class MyEnv extends Environment {
+
+ /**
+ * @throws FileNotFoundException from DB core.
+ */
+ MyEnv(File home, EnvironmentConfig config)
+ throws DatabaseException, FileNotFoundException {
+
+ super(home, config);
+ }
+
+ @Override
+ protected void finalize() {
+ finalizedFlag.append('.');
+ }
+ }
+
+ MyEnv myEnv = new MyEnv(env.getHome(), env.getConfig());
+ CurrentTransaction myCurrTxn = CurrentTransaction.getInstance(myEnv);
+
+ store.close();
+ store = null;
+ map = null;
+
+ env.close();
+ env = null;
+
+ myEnv.close();
+ myEnv = null;
+
+ myCurrTxn = null;
+ currentTxn = null;
+
+ for (int i = 0; i < 10; i += 1) {
+ byte[] x = null;
+ try {
+ x = new byte[Integer.MAX_VALUE - 1];
+ } catch (OutOfMemoryError expected) {
+ }
+ assertNull(x);
+ System.gc();
+ }
+
+ for (int i = 0; i < 10; i += 1) {
+ System.gc();
+ }
+
+ assertTrue(finalizedFlag.length() > 0);
+ }
+
+ private synchronized void doReadUncommitted(StoredSortedMap dirtyMap)
+ throws Exception {
+
+ // start thread one
+ ReadUncommittedThreadOne t1 = new ReadUncommittedThreadOne(env, this);
+ t1.start();
+ wait();
+
+ // put ONE
+ synchronized (t1) { t1.notify(); }
+ wait();
+ readCheck(dirtyMap, ONE, ONE);
+ assertTrue(!dirtyMap.isEmpty());
+
+ // abort ONE
+ synchronized (t1) { t1.notify(); }
+ t1.join();
+ readCheck(dirtyMap, ONE, null);
+ assertTrue(dirtyMap.isEmpty());
+
+ // start thread two
+ ReadUncommittedThreadTwo t2 = new ReadUncommittedThreadTwo(env, this);
+ t2.start();
+ wait();
+
+ // put TWO
+ synchronized (t2) { t2.notify(); }
+ wait();
+ readCheck(dirtyMap, TWO, TWO);
+ assertTrue(!dirtyMap.isEmpty());
+
+ // commit TWO
+ synchronized (t2) { t2.notify(); }
+ t2.join();
+ readCheck(dirtyMap, TWO, TWO);
+ assertTrue(!dirtyMap.isEmpty());
+ }
+
+ private static class ReadUncommittedThreadOne extends Thread {
+
+ private final CurrentTransaction currentTxn;
+ private final TransactionTest parent;
+ private final StoredSortedMap map;
+
+ private ReadUncommittedThreadOne(Environment env,
+ TransactionTest parent) {
+
+ this.currentTxn = CurrentTransaction.getInstance(env);
+ this.parent = parent;
+ this.map = parent.map;
+ }
+
+ @Override
+ public synchronized void run() {
+
+ try {
+ assertNull(currentTxn.getTransaction());
+ assertNotNull(currentTxn.beginTransaction(null));
+ assertNotNull(currentTxn.getTransaction());
+ readCheck(map, ONE, null);
+ synchronized (parent) { parent.notify(); }
+ wait();
+
+ // put ONE
+ assertNull(map.put(ONE, ONE));
+ readCheck(map, ONE, ONE);
+ synchronized (parent) { parent.notify(); }
+ wait();
+
+ // abort ONE
+ assertNull(currentTxn.abortTransaction());
+ assertNull(currentTxn.getTransaction());
+ } catch (Exception e) {
+ throw new RuntimeExceptionWrapper(e);
+ }
+ }
+ }
+
+ private static class ReadUncommittedThreadTwo extends Thread {
+
+ private final Environment env;
+ private final CurrentTransaction currentTxn;
+ private final TransactionTest parent;
+ private final StoredSortedMap map;
+
+ private ReadUncommittedThreadTwo(Environment env,
+ TransactionTest parent) {
+
+ this.env = env;
+ this.currentTxn = CurrentTransaction.getInstance(env);
+ this.parent = parent;
+ this.map = parent.map;
+ }
+
+ @Override
+ public synchronized void run() {
+
+ try {
+ final TransactionRunner runner = new TransactionRunner(env);
+ final Object thread = this;
+ assertNull(currentTxn.getTransaction());
+
+ runner.run(new TransactionWorker() {
+ public void doWork() throws Exception {
+ assertNotNull(currentTxn.getTransaction());
+ readCheck(map, TWO, null);
+ synchronized (parent) { parent.notify(); }
+ thread.wait();
+
+ // put TWO
+ assertNull(map.put(TWO, TWO));
+ readCheck(map, TWO, TWO);
+ synchronized (parent) { parent.notify(); }
+ thread.wait();
+
+ // commit TWO
+ }
+ });
+ assertNull(currentTxn.getTransaction());
+ } catch (Exception e) {
+ throw new RuntimeExceptionWrapper(e);
+ }
+ }
+ }
+
+ private static void readCheck(StoredSortedMap checkMap, Object key,
+ Object expect) {
+ if (expect == null) {
+ assertNull(checkMap.get(key));
+ assertTrue(checkMap.tailMap(key).isEmpty());
+ assertTrue(!checkMap.tailMap(key).containsKey(key));
+ assertTrue(!checkMap.keySet().contains(key));
+ assertTrue(checkMap.duplicates(key).isEmpty());
+ Iterator i = checkMap.keySet().iterator();
+ try {
+ while (i.hasNext()) {
+ assertTrue(!key.equals(i.next()));
+ }
+ } finally { StoredIterator.close(i); }
+ } else {
+ assertEquals(expect, checkMap.get(key));
+ assertEquals(expect, checkMap.tailMap(key).get(key));
+ assertTrue(!checkMap.tailMap(key).isEmpty());
+ assertTrue(checkMap.tailMap(key).containsKey(key));
+ assertTrue(checkMap.keySet().contains(key));
+ assertTrue(checkMap.values().contains(expect));
+ assertTrue(!checkMap.duplicates(key).isEmpty());
+ assertTrue(checkMap.duplicates(key).contains(expect));
+ Iterator i = checkMap.keySet().iterator();
+ try {
+ boolean found = false;
+ while (i.hasNext()) {
+ if (expect.equals(i.next())) {
+ found = true;
+ }
+ }
+ assertTrue(found);
+ }
+ finally { StoredIterator.close(i); }
+ }
+ }
+
+ /**
+ * Tests transaction retries performed by TransationRunner.
+ *
+ * This test is too sensitive to how lock conflict detection works on JE to
+ * make it work properly on DB core.
+ */
+
+ /**
+ * Tests transaction retries performed by TransationRunner.
+ *
+ * This test is too sensitive to how lock conflict detection works on JE to
+ * make it work properly on DB core.
+ */
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java
new file mode 100644
index 0000000..797a0bb
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java
@@ -0,0 +1,97 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.collections.test.serial;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.Environment;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class CatalogCornerCaseTest extends TestCase {
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ return new TestSuite(CatalogCornerCaseTest.class);
+ }
+
+ private Environment env;
+
+ public CatalogCornerCaseTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.printTestName(getName());
+ env = TestEnv.BDB.open(getName());
+ }
+
+ @Override
+ public void tearDown() {
+
+ try {
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ } finally {
+ /* Ensure that GC can cleanup. */
+ env = null;
+ }
+ }
+
+ public void testReadOnlyEmptyCatalog()
+ throws Exception {
+
+ String file = "catalog.db";
+
+ /* Create an empty database. */
+ DatabaseConfig config = new DatabaseConfig();
+ config.setAllowCreate(true);
+ DbCompat.setTypeBtree(config);
+ Database db =
+ DbCompat.testOpenDatabase(env, null, file, null, config);
+ db.close();
+
+ /* Open the empty database read-only. */
+ config.setAllowCreate(false);
+ config.setReadOnly(true);
+ db = DbCompat.testOpenDatabase(env, null, file, null, config);
+
+ /* Expect exception when creating the catalog. */
+ try {
+ new StoredClassCatalog(db);
+ fail();
+ } catch (IllegalStateException e) { }
+ db.close();
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java
new file mode 100644
index 0000000..f7a8c3f
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java
@@ -0,0 +1,177 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.collections.test.serial;
+
+import java.io.ObjectStreamClass;
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.Environment;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Runs part two of the StoredClassCatalogTest. This part is run with the
+ * new/updated version of TestSerial in the classpath. It uses the
+ * environment and databases created by StoredClassCatalogTestInit. It
+ * verifies that it can read objects serialized using the old class format,
+ * and that it can create new objects with the new class format.
+ *
+ * @author Mark Hayes
+ */
+public class StoredClassCatalogTest extends TestCase
+ implements TransactionWorker {
+
+ static final String CATALOG_FILE = "catalogtest-catalog.db";
+ static final String STORE_FILE = "catalogtest-store.db";
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+ suite.addTest(new StoredClassCatalogTest(TestEnv.ALL[i]));
+ }
+ return suite;
+ }
+
+ private TestEnv testEnv;
+ private Environment env;
+ private StoredClassCatalog catalog;
+ private StoredClassCatalog catalog2;
+ private Database store;
+ private Map map;
+ private TransactionRunner runner;
+
+ public StoredClassCatalogTest(TestEnv testEnv) {
+
+ super(makeTestName(testEnv));
+ this.testEnv = testEnv;
+ }
+
+ static String makeTestName(TestEnv testEnv) {
+ return "StoredClassCatalogTest-" + testEnv.getName();
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.printTestName(getName());
+ env = testEnv.open(makeTestName(testEnv), false);
+ runner = new TransactionRunner(env);
+
+ catalog = new StoredClassCatalog(openDb(CATALOG_FILE, false));
+ catalog2 = new StoredClassCatalog(openDb("catalog2.db", true));
+
+ SerialBinding keyBinding = new SerialBinding(catalog,
+ String.class);
+ SerialBinding valueBinding = new SerialBinding(catalog,
+ TestSerial.class);
+ store = openDb(STORE_FILE, false);
+
+ map = new StoredMap(store, keyBinding, valueBinding, true);
+ }
+
+ private Database openDb(String file, boolean create)
+ throws Exception {
+
+ DatabaseConfig config = new DatabaseConfig();
+ DbCompat.setTypeBtree(config);
+ config.setTransactional(testEnv.isTxnMode());
+ config.setAllowCreate(create);
+
+ return DbCompat.testOpenDatabase(env, null, file, null, config);
+ }
+
+ @Override
+ public void tearDown() {
+
+ try {
+ if (catalog != null) {
+ catalog.close();
+ catalog.close(); // should have no effect
+ }
+ if (catalog2 != null) {
+ catalog2.close();
+ }
+ if (store != null) {
+ store.close();
+ }
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.err.println("Ignored exception during tearDown: ");
+ e.printStackTrace();
+ } finally {
+ /* Ensure that GC can cleanup. */
+ catalog = null;
+ catalog2 = null;
+ store = null;
+ env = null;
+ testEnv = null;
+ map = null;
+ runner = null;
+ }
+ }
+
+ @Override
+ public void runTest()
+ throws Exception {
+
+ runner.run(this);
+ }
+
+ public void doWork()
+ throws Exception {
+
+ TestSerial one = (TestSerial) map.get("one");
+ TestSerial two = (TestSerial) map.get("two");
+ assertNotNull(one);
+ assertNotNull(two);
+ assertEquals(one, two.getOther());
+ assertNull(one.getStringField());
+ assertNull(two.getStringField());
+
+ TestSerial three = new TestSerial(two);
+ assertNotNull(three.getStringField());
+ map.put("three", three);
+ three = (TestSerial) map.get("three");
+ assertEquals(two, three.getOther());
+
+ ObjectStreamClass desc = ObjectStreamClass.lookup(TestSerial.class);
+
+ assertNotNull(catalog.getClassID(desc));
+ assertNotNull(catalog.getClassID(desc));
+
+ // test with empty catalog
+ assertNotNull(catalog2.getClassID(desc));
+ assertNotNull(catalog2.getClassID(desc));
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java
new file mode 100644
index 0000000..96ad345
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java
@@ -0,0 +1,154 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.collections.test.serial;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.collections.StoredMap;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.Environment;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Runs part one of the StoredClassCatalogTest. This part is run with the
+ * old/original version of TestSerial in the classpath. It creates a fresh
+ * environment and databases containing serialized versions of the old class.
+ * When StoredClassCatalogTest is run, it will read these objects from the
+ * database created here.
+ *
+ * @author Mark Hayes
+ */
+public class StoredClassCatalogTestInit extends TestCase
+ implements TransactionWorker {
+
+ static final String CATALOG_FILE = StoredClassCatalogTest.CATALOG_FILE;
+ static final String STORE_FILE = StoredClassCatalogTest.STORE_FILE;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+ suite.addTest(new StoredClassCatalogTestInit(TestEnv.ALL[i]));
+ }
+ return suite;
+ }
+
+ private TestEnv testEnv;
+ private Environment env;
+ private StoredClassCatalog catalog;
+ private Database store;
+ private Map map;
+ private TransactionRunner runner;
+
+ public StoredClassCatalogTestInit(TestEnv testEnv) {
+
+ super("StoredClassCatalogTestInit-" + testEnv.getName());
+ this.testEnv = testEnv;
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.printTestName(getName());
+ env = testEnv.open(StoredClassCatalogTest.makeTestName(testEnv));
+ runner = new TransactionRunner(env);
+
+ catalog = new StoredClassCatalog(openDb(CATALOG_FILE));
+
+ SerialBinding keyBinding = new SerialBinding(catalog, String.class);
+ SerialBinding valueBinding =
+ new SerialBinding(catalog, TestSerial.class);
+ store = openDb(STORE_FILE);
+
+ map = new StoredMap(store, keyBinding, valueBinding, true);
+ }
+
+ private Database openDb(String file)
+ throws Exception {
+
+ DatabaseConfig config = new DatabaseConfig();
+ DbCompat.setTypeBtree(config);
+ config.setTransactional(testEnv.isTxnMode());
+ config.setAllowCreate(true);
+
+ return DbCompat.testOpenDatabase(env, null, file, null, config);
+ }
+
+ @Override
+ public void tearDown() {
+
+ try {
+ if (catalog != null) {
+ catalog.close();
+ catalog.close(); // should have no effect
+ }
+ if (store != null) {
+ store.close();
+ }
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.err.println("Ignored exception during tearDown: ");
+ e.printStackTrace();
+ } finally {
+ /* Ensure that GC can cleanup. */
+ catalog = null;
+ store = null;
+ env = null;
+ testEnv = null;
+ map = null;
+ runner = null;
+ }
+ }
+
+ @Override
+ public void runTest()
+ throws Exception {
+
+ runner.run(this);
+ }
+
+ public void doWork() {
+ TestSerial one = new TestSerial(null);
+ TestSerial two = new TestSerial(one);
+ assertNull("Likely the classpath contains the wrong version of the" +
+ " TestSerial class, the 'original' version is required",
+ one.getStringField());
+ assertNull(two.getStringField());
+ map.put("one", one);
+ map.put("two", two);
+ one = (TestSerial) map.get("one");
+ two = (TestSerial) map.get("two");
+ assertEquals(one, two.getOther());
+ assertNull(one.getStringField());
+ assertNull(two.getStringField());
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java
new file mode 100644
index 0000000..df13cf4
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.collections.test.serial;
+
+/**
+ * @see StoredClassCatalogTest
+ * @author Mark Hayes
+ */
+class TestSerial implements java.io.Serializable {
+
+ static final long serialVersionUID = -3738980000390384920L;
+
+ private int i = 123;
+ private TestSerial other;
+
+ // The following field 's' was added after this class was compiled and
+ // serialized instances were saved in resource files. This allows testing
+ // that the original stored instances can be deserialized after changing
+ // the class. The serialVersionUID is needed for this according to Java
+ // serialization rules, and was generated with the serialver tool.
+ //
+ private String s = "string";
+
+ TestSerial(TestSerial other) {
+
+ this.other = other;
+ }
+
+ TestSerial getOther() {
+
+ return other;
+ }
+
+ int getIntField() {
+
+ return i;
+ }
+
+ String getStringField() {
+
+ return s; // this returned null before field 's' was added.
+ }
+
+ public boolean equals(Object object) {
+
+ try {
+ TestSerial o = (TestSerial) object;
+ if ((o.other == null) ? (this.other != null)
+ : (!o.other.equals(this.other))) {
+ return false;
+ }
+ if (this.i != o.i) {
+ return false;
+ }
+ // the following test was not done before field 's' was added
+ if ((o.s == null) ? (this.s != null)
+ : (!o.s.equals(this.s))) {
+ return false;
+ }
+ return true;
+ } catch (ClassCastException e) {
+ return false;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original
new file mode 100644
index 0000000..7eef274
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TestSerial.java.original
@@ -0,0 +1,72 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.collections.test.serial;
+
+/**
+ * @see StoredClassCatalogTest
+ * @author Mark Hayes
+ */
+class TestSerial implements java.io.Serializable
+{
+ static final long serialVersionUID = -3738980000390384920L;
+
+ private int i = 123;
+ private TestSerial other;
+
+ // The following field 's' was added after this class was compiled and
+ // serialized instances were saved in resource files. This allows testing
+ // that the original stored instances can be deserialized after changing
+ // the class. The serialVersionUID is needed for this according to Java
+ // serialization rules, and was generated with the serialver tool.
+ //
+ //private String s = "string";
+
+ TestSerial(TestSerial other)
+ {
+ this.other = other;
+ }
+
+ TestSerial getOther()
+ {
+ return other;
+ }
+
+ int getIntField()
+ {
+ return i;
+ }
+
+ String getStringField()
+ {
+ return null; // this returned null before field 's' was added.
+ }
+
+ public boolean equals(Object object)
+ {
+ try
+ {
+ TestSerial o = (TestSerial) object;
+ if ((o.other == null) ? (this.other != null)
+ : (!o.other.equals(this.other)))
+ return false;
+ if (this.i != o.i)
+ return false;
+ // the following test was not done before field 's' was added
+ /*
+ if ((o.s == null) ? (this.s != null)
+ : (!o.s.equals(this.s)))
+ return false;
+ */
+ return true;
+ }
+ catch (ClassCastException e)
+ {
+ return false;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java
new file mode 100644
index 0000000..d13f851
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java
@@ -0,0 +1,245 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.collections.test.serial;
+
+import java.util.Map;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.test.MarshalledObject;
+import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+import com.sleepycat.collections.TupleSerialFactory;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.ForeignKeyDeleteAction;
+import com.sleepycat.db.SecondaryConfig;
+import com.sleepycat.db.SecondaryDatabase;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class TupleSerialFactoryTest extends TestCase
+ implements TransactionWorker {
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < TestEnv.ALL.length; i += 1) {
+ for (int sorted = 0; sorted < 2; sorted += 1) {
+ suite.addTest(new TupleSerialFactoryTest(TestEnv.ALL[i],
+ sorted != 0));
+ }
+ }
+ return suite;
+ }
+
+ private TestEnv testEnv;
+ private Environment env;
+ private StoredClassCatalog catalog;
+ private TransactionRunner runner;
+ private TupleSerialFactory factory;
+ private Database store1;
+ private Database store2;
+ private SecondaryDatabase index1;
+ private SecondaryDatabase index2;
+ private final boolean isSorted;
+ private Map storeMap1;
+ private Map storeMap2;
+ private Map indexMap1;
+ private Map indexMap2;
+
+ public TupleSerialFactoryTest(TestEnv testEnv, boolean isSorted) {
+
+ super(null);
+
+ this.testEnv = testEnv;
+ this.isSorted = isSorted;
+
+ String name = "TupleSerialFactoryTest-" + testEnv.getName();
+ name += isSorted ? "-sorted" : "-unsorted";
+ setName(name);
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ SharedTestUtils.printTestName(getName());
+ env = testEnv.open(getName());
+ runner = new TransactionRunner(env);
+
+ createDatabase();
+ }
+
+ @Override
+ public void tearDown() {
+
+ try {
+ if (index1 != null) {
+ index1.close();
+ }
+ if (index2 != null) {
+ index2.close();
+ }
+ if (store1 != null) {
+ store1.close();
+ }
+ if (store2 != null) {
+ store2.close();
+ }
+ if (catalog != null) {
+ catalog.close();
+ }
+ if (env != null) {
+ env.close();
+ }
+ } catch (Exception e) {
+ System.out.println("Ignored exception during tearDown: " + e);
+ } finally {
+ /* Ensure that GC can cleanup. */
+ index1 = null;
+ index2 = null;
+ store1 = null;
+ store2 = null;
+ catalog = null;
+ env = null;
+ testEnv = null;
+ runner = null;
+ factory = null;
+ storeMap1 = null;
+ storeMap2 = null;
+ indexMap1 = null;
+ indexMap2 = null;
+ }
+ }
+
+ @Override
+ public void runTest()
+ throws Exception {
+
+ runner.run(this);
+ }
+
+ public void doWork() {
+ createViews();
+ writeAndRead();
+ }
+
+ private void createDatabase()
+ throws Exception {
+
+ catalog = new StoredClassCatalog(openDb("catalog.db"));
+ factory = new TupleSerialFactory(catalog);
+ assertSame(catalog, factory.getCatalog());
+
+ store1 = openDb("store1.db");
+ store2 = openDb("store2.db");
+ index1 = openSecondaryDb(factory, "1", store1, "index1.db", null);
+ index2 = openSecondaryDb(factory, "2", store2, "index2.db", store1);
+ }
+
+ private Database openDb(String file)
+ throws Exception {
+
+ DatabaseConfig config = new DatabaseConfig();
+ config.setTransactional(testEnv.isTxnMode());
+ config.setAllowCreate(true);
+ DbCompat.setTypeBtree(config);
+
+ return DbCompat.testOpenDatabase(env, null, file, null, config);
+ }
+
+ private SecondaryDatabase openSecondaryDb(TupleSerialFactory factory,
+ String keyName,
+ Database primary,
+ String file,
+ Database foreignStore)
+ throws Exception {
+
+ SecondaryConfig secConfig = new SecondaryConfig();
+ secConfig.setTransactional(testEnv.isTxnMode());
+ secConfig.setAllowCreate(true);
+ DbCompat.setTypeBtree(secConfig);
+ secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class,
+ keyName));
+ if (foreignStore != null) {
+ secConfig.setForeignKeyDatabase(foreignStore);
+ secConfig.setForeignKeyDeleteAction(
+ ForeignKeyDeleteAction.CASCADE);
+ }
+
+ return DbCompat.testOpenSecondaryDatabase
+ (env, null, file, null, primary, secConfig);
+ }
+
+ private void createViews() {
+ if (isSorted) {
+ storeMap1 = factory.newSortedMap(store1, String.class,
+ MarshalledObject.class, true);
+ storeMap2 = factory.newSortedMap(store2, String.class,
+ MarshalledObject.class, true);
+ indexMap1 = factory.newSortedMap(index1, String.class,
+ MarshalledObject.class, true);
+ indexMap2 = factory.newSortedMap(index2, String.class,
+ MarshalledObject.class, true);
+ } else {
+ storeMap1 = factory.newMap(store1, String.class,
+ MarshalledObject.class, true);
+ storeMap2 = factory.newMap(store2, String.class,
+ MarshalledObject.class, true);
+ indexMap1 = factory.newMap(index1, String.class,
+ MarshalledObject.class, true);
+ indexMap2 = factory.newMap(index2, String.class,
+ MarshalledObject.class, true);
+ }
+ }
+
+ private void writeAndRead() {
+ MarshalledObject o1 = new MarshalledObject("data1", "pk1", "ik1", "");
+ assertNull(storeMap1.put(null, o1));
+
+ assertEquals(o1, storeMap1.get("pk1"));
+ assertEquals(o1, indexMap1.get("ik1"));
+
+ MarshalledObject o2 = new MarshalledObject("data2", "pk2", "", "pk1");
+ assertNull(storeMap2.put(null, o2));
+
+ assertEquals(o2, storeMap2.get("pk2"));
+ assertEquals(o2, indexMap2.get("pk1"));
+
+ /*
+ * store1 contains o1 with primary key "pk1" and index key "ik1"
+ * store2 contains o2 with primary key "pk2" and foreign key "pk1"
+ * which is the primary key of store1
+ */
+
+ storeMap1.remove("pk1");
+ assertNull(storeMap1.get("pk1"));
+ assertNull(indexMap1.get("ik1"));
+ assertNull(storeMap2.get("pk2"));
+ assertNull(indexMap2.get("pk1"));
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/db/util/DualTestCase.java b/db-4.8.30/test/scr024/src/com/sleepycat/db/util/DualTestCase.java
new file mode 100644
index 0000000..1fbde28
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/db/util/DualTestCase.java
@@ -0,0 +1,88 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002,2009 Oracle. All rights reserved.
+ *
+ * $Id: DualTestCase.java,v 1.6 2009/01/13 10:41:22 cwl Exp $
+ */
+
+package com.sleepycat.db.util;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+
+public class DualTestCase extends TestCase {
+
+ private Environment env;
+ private boolean setUpInvoked = false;
+
+ public DualTestCase() {
+ super();
+ }
+
+ protected DualTestCase(String name) {
+ super(name);
+ }
+
+ @Override
+ protected void setUp()
+ throws Exception {
+
+ setUpInvoked = true;
+ super.setUp();
+ }
+
+ @Override
+ protected void tearDown()
+ throws Exception {
+
+ if (!setUpInvoked) {
+ throw new IllegalStateException
+ ("tearDown was invoked without a corresponding setUp() call");
+ }
+ destroy();
+ super.tearDown();
+ }
+
+ protected Environment create(File envHome, EnvironmentConfig envConfig)
+ throws DatabaseException {
+
+ try {
+ env = new Environment(envHome, envConfig);
+ } catch (FileNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ return env;
+ }
+
+ protected void close(Environment environment)
+ throws DatabaseException {
+
+ env.close();
+ env = null;
+ }
+
+ protected void destroy()
+ throws Exception {
+
+ if (env != null) {
+ try {
+ /* Close in case we hit an exception and didn't close */
+ env.close();
+ } catch (RuntimeException e) {
+ /* OK if already closed */
+ }
+ env = null;
+ }
+ }
+
+ public static boolean isReplicatedTest(Class<?> testCaseClass) {
+ return false;
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/BindingTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/BindingTest.java
new file mode 100644
index 0000000..504014e
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/BindingTest.java
@@ -0,0 +1,2425 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.ForeignMultiKeyNullifier;
+import com.sleepycat.db.SecondaryKeyCreator;
+import com.sleepycat.db.SecondaryMultiKeyCreator;
+import com.sleepycat.persist.impl.PersistCatalog;
+import com.sleepycat.persist.impl.PersistComparator;
+import com.sleepycat.persist.impl.PersistEntityBinding;
+import com.sleepycat.persist.impl.PersistKeyBinding;
+import com.sleepycat.persist.impl.PersistKeyCreator;
+import com.sleepycat.persist.impl.SimpleCatalog;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.PrimaryKeyMetadata;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.model.SecondaryKeyMetadata;
+import com.sleepycat.persist.raw.RawField;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawType;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class BindingTest extends TestCase {
+
+ private static final String STORE_PREFIX = "persist#foo#";
+
+ private File envHome;
+ private Environment env;
+ private EntityModel model;
+ private PersistCatalog catalog;
+ private DatabaseEntry keyEntry;
+ private DatabaseEntry dataEntry;
+
+ @Override
+ public void setUp() {
+ envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
+ SharedTestUtils.emptyDir(envHome);
+ keyEntry = new DatabaseEntry();
+ dataEntry = new DatabaseEntry();
+ }
+
+ @Override
+ public void tearDown() {
+ if (env != null) {
+ try {
+ env.close();
+ } catch (Exception e) {
+ System.out.println("During tearDown: " + e);
+ }
+ }
+ envHome = null;
+ env = null;
+ catalog = null;
+ keyEntry = null;
+ dataEntry = null;
+ }
+
+ /**
+ * @throws FileNotFoundException from DB core.
+ */
+ private void open()
+ throws FileNotFoundException, DatabaseException {
+
+ EnvironmentConfig envConfig = TestEnv.BDB.getConfig();
+ envConfig.setAllowCreate(true);
+ env = new Environment(envHome, envConfig);
+ openCatalog();
+ }
+
+ private void openCatalog()
+ throws DatabaseException {
+
+ model = new AnnotationModel();
+ model.registerClass(LocalizedTextProxy.class);
+ model.registerClass(LocaleProxy.class);
+
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ dbConfig.setAllowCreate(true);
+ DbCompat.setTypeBtree(dbConfig);
+ catalog = new PersistCatalog
+ (null, env, STORE_PREFIX, STORE_PREFIX + "catalog", dbConfig,
+ model, null, false /*rawAccess*/, null /*Store*/);
+ }
+
+ private void close()
+ throws DatabaseException {
+
+ /* Close/open/close catalog to test checks for class evolution. */
+ catalog.close();
+ PersistCatalog.expectNoClassChanges = true;
+ try {
+ openCatalog();
+ } finally {
+ PersistCatalog.expectNoClassChanges = false;
+ }
+ catalog.close();
+ catalog = null;
+
+ env.close();
+ env = null;
+ }
+
+ public void testBasic()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(Basic.class,
+ new Basic(1, "one", 2.2, "three"));
+ checkEntity(Basic.class,
+ new Basic(0, null, 0, null));
+ checkEntity(Basic.class,
+ new Basic(-1, "xxx", -2, "xxx"));
+
+ checkMetadata(Basic.class.getName(), new String[][] {
+ {"id", "long"},
+ {"one", "java.lang.String"},
+ {"two", "double"},
+ {"three", "java.lang.String"},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ close();
+ }
+
+ @Entity
+ static class Basic implements MyEntity {
+
+ @PrimaryKey
+ private long id;
+ private String one;
+ private double two;
+ private String three;
+
+ private Basic() { }
+
+ private Basic(long id, String one, double two, String three) {
+ this.id = id;
+ this.one = one;
+ this.two = two;
+ this.three = three;
+ }
+
+ public String getBasicOne() {
+ return one;
+ }
+
+ public Object getPriKeyObject() {
+ return id;
+ }
+
+ public void validate(Object other) {
+ Basic o = (Basic) other;
+ TestCase.assertEquals(id, o.id);
+ TestCase.assertTrue(nullOrEqual(one, o.one));
+ TestCase.assertEquals(two, o.two);
+ TestCase.assertTrue(nullOrEqual(three, o.three));
+ if (one == three) {
+ TestCase.assertSame(o.one, o.three);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "" + id + ' ' + one + ' ' + two;
+ }
+ }
+
+ public void testSimpleTypes()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(SimpleTypes.class, new SimpleTypes());
+
+ checkMetadata(SimpleTypes.class.getName(), new String[][] {
+ {"f0", "boolean"},
+ {"f1", "char"},
+ {"f2", "byte"},
+ {"f3", "short"},
+ {"f4", "int"},
+ {"f5", "long"},
+ {"f6", "float"},
+ {"f7", "double"},
+ {"f8", "java.lang.String"},
+ {"f9", "java.math.BigInteger"},
+ //{"f10", "java.math.BigDecimal"},
+ {"f11", "java.util.Date"},
+ {"f12", "java.lang.Boolean"},
+ {"f13", "java.lang.Character"},
+ {"f14", "java.lang.Byte"},
+ {"f15", "java.lang.Short"},
+ {"f16", "java.lang.Integer"},
+ {"f17", "java.lang.Long"},
+ {"f18", "java.lang.Float"},
+ {"f19", "java.lang.Double"},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ close();
+ }
+
+ @Entity
+ static class SimpleTypes implements MyEntity {
+
+ @PrimaryKey
+ private final boolean f0 = true;
+ private final char f1 = 'a';
+ private final byte f2 = 123;
+ private final short f3 = 123;
+ private final int f4 = 123;
+ private final long f5 = 123;
+ private final float f6 = 123.4f;
+ private final double f7 = 123.4;
+ private final String f8 = "xxx";
+ private final BigInteger f9 = BigInteger.valueOf(123);
+ //private BigDecimal f10 = BigDecimal.valueOf(123.4);
+ private final Date f11 = new Date();
+ private final Boolean f12 = true;
+ private final Character f13 = 'a';
+ private final Byte f14 = 123;
+ private final Short f15 = 123;
+ private final Integer f16 = 123;
+ private final Long f17 = 123L;
+ private final Float f18 = 123.4f;
+ private final Double f19 = 123.4;
+
+ SimpleTypes() { }
+
+ public Object getPriKeyObject() {
+ return f0;
+ }
+
+ public void validate(Object other) {
+ SimpleTypes o = (SimpleTypes) other;
+ TestCase.assertEquals(f0, o.f0);
+ TestCase.assertEquals(f1, o.f1);
+ TestCase.assertEquals(f2, o.f2);
+ TestCase.assertEquals(f3, o.f3);
+ TestCase.assertEquals(f4, o.f4);
+ TestCase.assertEquals(f5, o.f5);
+ TestCase.assertEquals(f6, o.f6);
+ TestCase.assertEquals(f7, o.f7);
+ TestCase.assertEquals(f8, o.f8);
+ TestCase.assertEquals(f9, o.f9);
+ //TestCase.assertEquals(f10, o.f10);
+ TestCase.assertEquals(f11, o.f11);
+ TestCase.assertEquals(f12, o.f12);
+ TestCase.assertEquals(f13, o.f13);
+ TestCase.assertEquals(f14, o.f14);
+ TestCase.assertEquals(f15, o.f15);
+ TestCase.assertEquals(f16, o.f16);
+ TestCase.assertEquals(f17, o.f17);
+ TestCase.assertEquals(f18, o.f18);
+ TestCase.assertEquals(f19, o.f19);
+ }
+ }
+
+ public void testArrayTypes()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(ArrayTypes.class, new ArrayTypes());
+
+ checkMetadata(ArrayTypes.class.getName(), new String[][] {
+ {"id", "int"},
+ {"f0", boolean[].class.getName()},
+ {"f1", char[].class.getName()},
+ {"f2", byte[].class.getName()},
+ {"f3", short[].class.getName()},
+ {"f4", int[].class.getName()},
+ {"f5", long[].class.getName()},
+ {"f6", float[].class.getName()},
+ {"f7", double[].class.getName()},
+ {"f8", String[].class.getName()},
+ {"f9", Address[].class.getName()},
+ {"f10", boolean[][][].class.getName()},
+ {"f11", String[][][].class.getName()},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ close();
+ }
+
+ @Entity
+ static class ArrayTypes implements MyEntity {
+
+ @PrimaryKey
+ private final int id = 1;
+ private final boolean[] f0 = {false, true};
+ private final char[] f1 = {'a', 'b'};
+ private final byte[] f2 = {1, 2};
+ private final short[] f3 = {1, 2};
+ private final int[] f4 = {1, 2};
+ private final long[] f5 = {1, 2};
+ private final float[] f6 = {1.1f, 2.2f};
+ private final double[] f7 = {1.1, 2,2};
+ private final String[] f8 = {"xxx", null, "yyy"};
+ private final Address[] f9 = {new Address("city", "state", 123),
+ null,
+ new Address("x", "y", 444)};
+ private final boolean[][][] f10 =
+ {
+ {
+ {false, true},
+ {false, true},
+ },
+ null,
+ {
+ {false, true},
+ {false, true},
+ },
+ };
+ private final String[][][] f11 =
+ {
+ {
+ {"xxx", null, "yyy"},
+ null,
+ {"xxx", null, "yyy"},
+ },
+ null,
+ {
+ {"xxx", null, "yyy"},
+ null,
+ {"xxx", null, "yyy"},
+ },
+ };
+
+ ArrayTypes() { }
+
+ public Object getPriKeyObject() {
+ return id;
+ }
+
+ public void validate(Object other) {
+ ArrayTypes o = (ArrayTypes) other;
+ TestCase.assertEquals(id, o.id);
+ TestCase.assertTrue(Arrays.equals(f0, o.f0));
+ TestCase.assertTrue(Arrays.equals(f1, o.f1));
+ TestCase.assertTrue(Arrays.equals(f2, o.f2));
+ TestCase.assertTrue(Arrays.equals(f3, o.f3));
+ TestCase.assertTrue(Arrays.equals(f4, o.f4));
+ TestCase.assertTrue(Arrays.equals(f5, o.f5));
+ TestCase.assertTrue(Arrays.equals(f6, o.f6));
+ TestCase.assertTrue(Arrays.equals(f7, o.f7));
+ TestCase.assertTrue(Arrays.equals(f8, o.f8));
+ TestCase.assertTrue(Arrays.deepEquals(f9, o.f9));
+ TestCase.assertTrue(Arrays.deepEquals(f10, o.f10));
+ TestCase.assertTrue(Arrays.deepEquals(f11, o.f11));
+ }
+ }
+
+ public void testEnumTypes()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(EnumTypes.class, new EnumTypes());
+
+ checkMetadata(EnumTypes.class.getName(), new String[][] {
+ {"f0", "int"},
+ {"f1", Thread.State.class.getName()},
+ {"f2", MyEnum.class.getName()},
+ {"f3", Object.class.getName()},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ close();
+ }
+
+ enum MyEnum { ONE, TWO };
+
+ @Entity
+ static class EnumTypes implements MyEntity {
+
+ @PrimaryKey
+ private final int f0 = 1;
+ private final Thread.State f1 = Thread.State.RUNNABLE;
+ private final MyEnum f2 = MyEnum.ONE;
+ private final Object f3 = MyEnum.TWO;
+
+ EnumTypes() { }
+
+ public Object getPriKeyObject() {
+ return f0;
+ }
+
+ public void validate(Object other) {
+ EnumTypes o = (EnumTypes) other;
+ TestCase.assertEquals(f0, o.f0);
+ TestCase.assertSame(f1, o.f1);
+ TestCase.assertSame(f2, o.f2);
+ TestCase.assertSame(f3, o.f3);
+ }
+ }
+
+ public void testProxyTypes()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(ProxyTypes.class, new ProxyTypes());
+
+ checkMetadata(ProxyTypes.class.getName(), new String[][] {
+ {"f0", "int"},
+ {"f1", Locale.class.getName()},
+ {"f2", Set.class.getName()},
+ {"f3", Set.class.getName()},
+ {"f4", Object.class.getName()},
+ {"f5", HashMap.class.getName()},
+ {"f6", TreeMap.class.getName()},
+ {"f7", List.class.getName()},
+ {"f8", LinkedList.class.getName()},
+ {"f9", LocalizedText.class.getName()},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ close();
+ }
+
+ @Entity
+ static class ProxyTypes implements MyEntity {
+
+ @PrimaryKey
+ private final int f0 = 1;
+ private final Locale f1 = Locale.getDefault();
+ private final Set<Integer> f2 = new HashSet<Integer>();
+ private final Set<Integer> f3 = new TreeSet<Integer>();
+ private final Object f4 = new HashSet<Address>();
+ private final HashMap<String,Integer> f5 =
+ new HashMap<String,Integer>();
+ private final TreeMap<String,Address> f6 =
+ new TreeMap<String,Address>();
+ private final List<Integer> f7 = new ArrayList<Integer>();
+ private final LinkedList<Integer> f8 = new LinkedList<Integer>();
+ private final LocalizedText f9 = new LocalizedText(f1, "xyz");
+
+ ProxyTypes() {
+ f2.add(123);
+ f2.add(456);
+ f3.add(456);
+ f3.add(123);
+ HashSet<Address> s = (HashSet) f4;
+ s.add(new Address("city", "state", 11111));
+ s.add(new Address("city2", "state2", 22222));
+ s.add(new Address("city3", "state3", 33333));
+ f5.put("one", 111);
+ f5.put("two", 222);
+ f5.put("three", 333);
+ f6.put("one", new Address("city", "state", 11111));
+ f6.put("two", new Address("city2", "state2", 22222));
+ f6.put("three", new Address("city3", "state3", 33333));
+ f7.add(123);
+ f7.add(456);
+ f8.add(123);
+ f8.add(456);
+ }
+
+ public Object getPriKeyObject() {
+ return f0;
+ }
+
+ public void validate(Object other) {
+ ProxyTypes o = (ProxyTypes) other;
+ TestCase.assertEquals(f0, o.f0);
+ TestCase.assertEquals(f1, o.f1);
+ TestCase.assertEquals(f2, o.f2);
+ TestCase.assertEquals(f3, o.f3);
+ TestCase.assertEquals(f4, o.f4);
+ TestCase.assertEquals(f5, o.f5);
+ TestCase.assertEquals(f6, o.f6);
+ TestCase.assertEquals(f7, o.f7);
+ TestCase.assertEquals(f8, o.f8);
+ TestCase.assertEquals(f9, o.f9);
+ }
+ }
+
+ @Persistent(proxyFor=Locale.class)
+ static class LocaleProxy implements PersistentProxy<Locale> {
+
+ String language;
+ String country;
+ String variant;
+
+ private LocaleProxy() {}
+
+ public void initializeProxy(Locale object) {
+ language = object.getLanguage();
+ country = object.getCountry();
+ variant = object.getVariant();
+ }
+
+ public Locale convertProxy() {
+ return new Locale(language, country, variant);
+ }
+ }
+
+ static class LocalizedText {
+
+ Locale locale;
+ String text;
+
+ LocalizedText(Locale locale, String text) {
+ this.locale = locale;
+ this.text = text;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ LocalizedText o = (LocalizedText) other;
+ return text.equals(o.text) &&
+ locale.equals(o.locale);
+ }
+ }
+
+ @Persistent(proxyFor=LocalizedText.class)
+ static class LocalizedTextProxy implements PersistentProxy<LocalizedText> {
+
+ Locale locale;
+ String text;
+
+ private LocalizedTextProxy() {}
+
+ public void initializeProxy(LocalizedText object) {
+ locale = object.locale;
+ text = object.text;
+ }
+
+ public LocalizedText convertProxy() {
+ return new LocalizedText(locale, text);
+ }
+ }
+
+ public void testEmbedded()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ Address a1 = new Address("city", "state", 123);
+ Address a2 = new Address("Wikieup", "AZ", 85360);
+
+ checkEntity(Embedded.class,
+ new Embedded("x", a1, a2));
+ checkEntity(Embedded.class,
+ new Embedded("y", a1, null));
+ checkEntity(Embedded.class,
+ new Embedded("", a2, a2));
+
+ checkMetadata(Embedded.class.getName(), new String[][] {
+ {"id", "java.lang.String"},
+ {"idShadow", "java.lang.String"},
+ {"one", Address.class.getName()},
+ {"two", Address.class.getName()},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ checkMetadata(Address.class.getName(), new String[][] {
+ {"street", "java.lang.String"},
+ {"city", "java.lang.String"},
+ {"zip", "int"},
+ },
+ -1 /*priKeyIndex*/, null);
+
+ close();
+ }
+
+ @Entity
+ static class Embedded implements MyEntity {
+
+ @PrimaryKey
+ private String id;
+ private String idShadow;
+ private Address one;
+ private Address two;
+
+ private Embedded() { }
+
+ private Embedded(String id, Address one, Address two) {
+ this.id = id;
+ idShadow = id;
+ this.one = one;
+ this.two = two;
+ }
+
+ public Object getPriKeyObject() {
+ return id;
+ }
+
+ public void validate(Object other) {
+ Embedded o = (Embedded) other;
+ TestCase.assertEquals(id, o.id);
+ if (one != null) {
+ one.validate(o.one);
+ } else {
+ assertNull(o.one);
+ }
+ if (two != null) {
+ two.validate(o.two);
+ } else {
+ assertNull(o.two);
+ }
+ TestCase.assertSame(o.id, o.idShadow);
+ if (one == two) {
+ TestCase.assertSame(o.one, o.two);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "" + id + ' ' + one + ' ' + two;
+ }
+ }
+
+ @Persistent
+ static class Address {
+
+ private String street;
+ private String city;
+ private int zip;
+
+ private Address() {}
+
+ Address(String street, String city, int zip) {
+ this.street = street;
+ this.city = city;
+ this.zip = zip;
+ }
+
+ void validate(Address o) {
+ TestCase.assertTrue(nullOrEqual(street, o.street));
+ TestCase.assertTrue(nullOrEqual(city, o.city));
+ TestCase.assertEquals(zip, o.zip);
+ }
+
+ @Override
+ public String toString() {
+ return "" + street + ' ' + city + ' ' + zip;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ Address o = (Address) other;
+ return nullOrEqual(street, o.street) &&
+ nullOrEqual(city, o.city) &&
+ nullOrEqual(zip, o.zip);
+ }
+
+ @Override
+ public int hashCode() {
+ return zip;
+ }
+ }
+
+ public void testSubclass()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(Basic.class,
+ new Subclass(-1, "xxx", -2, "xxx", "xxx", true));
+
+ checkMetadata(Basic.class.getName(), new String[][] {
+ {"id", "long"},
+ {"one", "java.lang.String"},
+ {"two", "double"},
+ {"three", "java.lang.String"},
+ },
+ 0 /*priKeyIndex*/, null);
+ checkMetadata(Subclass.class.getName(), new String[][] {
+ {"one", "java.lang.String"},
+ {"two", "boolean"},
+ },
+ -1 /*priKeyIndex*/, Basic.class.getName());
+
+ close();
+ }
+
+ @Persistent
+ static class Subclass extends Basic {
+
+ private String one;
+ private boolean two;
+
+ private Subclass() {
+ }
+
+ private Subclass(long id, String one, double two, String three,
+ String subOne, boolean subTwo) {
+ super(id, one, two, three);
+ this.one = subOne;
+ this.two = subTwo;
+ }
+
+ @Override
+ public void validate(Object other) {
+ super.validate(other);
+ Subclass o = (Subclass) other;
+ TestCase.assertTrue(nullOrEqual(one, o.one));
+ TestCase.assertEquals(two, o.two);
+ if (one == getBasicOne()) {
+ TestCase.assertSame(o.one, o.getBasicOne());
+ }
+ }
+ }
+
+ public void testSuperclass()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(UseSuperclass.class,
+ new UseSuperclass(33, "xxx"));
+
+ checkMetadata(Superclass.class.getName(), new String[][] {
+ {"id", "int"},
+ {"one", "java.lang.String"},
+ },
+ 0 /*priKeyIndex*/, null);
+ checkMetadata(UseSuperclass.class.getName(), new String[][] {
+ },
+ -1 /*priKeyIndex*/, Superclass.class.getName());
+
+ close();
+ }
+
+ @Persistent
+ static class Superclass implements MyEntity {
+
+ @PrimaryKey
+ private int id;
+ private String one;
+
+ private Superclass() { }
+
+ private Superclass(int id, String one) {
+ this.id = id;
+ this.one = one;
+ }
+
+ public Object getPriKeyObject() {
+ return id;
+ }
+
+ public void validate(Object other) {
+ Superclass o = (Superclass) other;
+ TestCase.assertEquals(id, o.id);
+ TestCase.assertTrue(nullOrEqual(one, o.one));
+ }
+ }
+
+ @Entity
+ static class UseSuperclass extends Superclass {
+
+ private UseSuperclass() { }
+
+ private UseSuperclass(int id, String one) {
+ super(id, one);
+ }
+ }
+
+ public void testAbstract()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ checkEntity(EntityUseAbstract.class,
+ new EntityUseAbstract(33, "xxx"));
+
+ checkMetadata(Abstract.class.getName(), new String[][] {
+ {"one", "java.lang.String"},
+ },
+ -1 /*priKeyIndex*/, null);
+ checkMetadata(EmbeddedUseAbstract.class.getName(), new String[][] {
+ {"two", "java.lang.String"},
+ },
+ -1 /*priKeyIndex*/, Abstract.class.getName());
+ checkMetadata(EntityUseAbstract.class.getName(), new String[][] {
+ {"id", "int"},
+ {"f1", EmbeddedUseAbstract.class.getName()},
+ {"f2", Abstract.class.getName()},
+ {"f3", Object.class.getName()},
+ {"f4", Interface.class.getName()},
+ {"a1", EmbeddedUseAbstract[].class.getName()},
+ {"a2", Abstract[].class.getName()},
+ {"a3", Abstract[].class.getName()},
+ {"a4", Object[].class.getName()},
+ {"a5", Interface[].class.getName()},
+ {"a6", Interface[].class.getName()},
+ {"a7", Interface[].class.getName()},
+ },
+ 0 /*priKeyIndex*/, Abstract.class.getName());
+
+ close();
+ }
+
+ @Persistent
+ static abstract class Abstract implements Interface {
+
+ String one;
+
+ private Abstract() { }
+
+ private Abstract(String one) {
+ this.one = one;
+ }
+
+ public void validate(Object other) {
+ Abstract o = (Abstract) other;
+ TestCase.assertTrue(nullOrEqual(one, o.one));
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ Abstract o = (Abstract) other;
+ return nullOrEqual(one, o.one);
+ }
+ }
+
+ interface Interface {
+ void validate(Object other);
+ }
+
+ @Persistent
+ static class EmbeddedUseAbstract extends Abstract {
+
+ private String two;
+
+ private EmbeddedUseAbstract() { }
+
+ private EmbeddedUseAbstract(String one, String two) {
+ super(one);
+ this.two = two;
+ }
+
+ @Override
+ public void validate(Object other) {
+ super.validate(other);
+ EmbeddedUseAbstract o = (EmbeddedUseAbstract) other;
+ TestCase.assertTrue(nullOrEqual(two, o.two));
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!super.equals(other)) {
+ return false;
+ }
+ EmbeddedUseAbstract o = (EmbeddedUseAbstract) other;
+ return nullOrEqual(two, o.two);
+ }
+ }
+
+ @Entity
+ static class EntityUseAbstract extends Abstract implements MyEntity {
+
+ @PrimaryKey
+ private int id;
+
+ private EmbeddedUseAbstract f1;
+ private Abstract f2;
+ private Object f3;
+ private Interface f4;
+ private EmbeddedUseAbstract[] a1;
+ private Abstract[] a2;
+ private Abstract[] a3;
+ private Object[] a4;
+ private Interface[] a5;
+ private Interface[] a6;
+ private Interface[] a7;
+
+ private EntityUseAbstract() { }
+
+ private EntityUseAbstract(int id, String one) {
+ super(one);
+ this.id = id;
+ f1 = new EmbeddedUseAbstract(one, one);
+ f2 = new EmbeddedUseAbstract(one + "x", one + "y");
+ f3 = new EmbeddedUseAbstract(null, null);
+ f4 = new EmbeddedUseAbstract(null, null);
+ a1 = new EmbeddedUseAbstract[3];
+ a2 = new EmbeddedUseAbstract[3];
+ a3 = new Abstract[3];
+ a4 = new Object[3];
+ a5 = new EmbeddedUseAbstract[3];
+ a6 = new Abstract[3];
+ a7 = new Interface[3];
+ for (int i = 0; i < 3; i += 1) {
+ a1[i] = new EmbeddedUseAbstract("1" + i, null);
+ a2[i] = new EmbeddedUseAbstract("2" + i, null);
+ a3[i] = new EmbeddedUseAbstract("3" + i, null);
+ a4[i] = new EmbeddedUseAbstract("4" + i, null);
+ a5[i] = new EmbeddedUseAbstract("5" + i, null);
+ a6[i] = new EmbeddedUseAbstract("6" + i, null);
+ a7[i] = new EmbeddedUseAbstract("7" + i, null);
+ }
+ }
+
+ public Object getPriKeyObject() {
+ return id;
+ }
+
+ @Override
+ public void validate(Object other) {
+ super.validate(other);
+ EntityUseAbstract o = (EntityUseAbstract) other;
+ TestCase.assertEquals(id, o.id);
+ f1.validate(o.f1);
+ assertSame(o.one, o.f1.one);
+ assertSame(o.f1.one, o.f1.two);
+ f2.validate(o.f2);
+ ((Abstract) f3).validate(o.f3);
+ f4.validate(o.f4);
+ assertTrue(arrayToString(a1) + ' ' + arrayToString(o.a1),
+ Arrays.equals(a1, o.a1));
+ assertTrue(Arrays.equals(a2, o.a2));
+ assertTrue(Arrays.equals(a3, o.a3));
+ assertTrue(Arrays.equals(a4, o.a4));
+ assertTrue(Arrays.equals(a5, o.a5));
+ assertTrue(Arrays.equals(a6, o.a6));
+ assertTrue(Arrays.equals(a7, o.a7));
+ assertSame(EmbeddedUseAbstract.class, f2.getClass());
+ assertSame(EmbeddedUseAbstract.class, f3.getClass());
+ assertSame(EmbeddedUseAbstract[].class, a1.getClass());
+ assertSame(EmbeddedUseAbstract[].class, a2.getClass());
+ assertSame(Abstract[].class, a3.getClass());
+ assertSame(Object[].class, a4.getClass());
+ assertSame(EmbeddedUseAbstract[].class, a5.getClass());
+ assertSame(Abstract[].class, a6.getClass());
+ assertSame(Interface[].class, a7.getClass());
+ }
+ }
+
+ public void testCompositeKey()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ CompositeKey key =
+ new CompositeKey(123, 456L, "xyz", BigInteger.valueOf(789),
+ MyEnum.ONE);
+ checkEntity(UseCompositeKey.class,
+ new UseCompositeKey(key, "one"));
+
+ checkMetadata(UseCompositeKey.class.getName(), new String[][] {
+ {"key", CompositeKey.class.getName()},
+ {"one", "java.lang.String"},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ checkMetadata(CompositeKey.class.getName(), new String[][] {
+ {"f1", "int"},
+ {"f2", "java.lang.Long"},
+ {"f3", "java.lang.String"},
+ {"f4", "java.math.BigInteger"},
+ {"f5", MyEnum.class.getName()},
+ },
+ -1 /*priKeyIndex*/, null);
+
+ close();
+ }
+
+ @Persistent
+ static class CompositeKey {
+ @KeyField(3)
+ private int f1;
+ @KeyField(2)
+ private Long f2;
+ @KeyField(1)
+ private String f3;
+ @KeyField(4)
+ private BigInteger f4;
+ @KeyField(5)
+ private MyEnum f5;
+
+ private CompositeKey() {}
+
+ CompositeKey(int f1, Long f2, String f3, BigInteger f4, MyEnum f5) {
+ this.f1 = f1;
+ this.f2 = f2;
+ this.f3 = f3;
+ this.f4 = f4;
+ this.f5 = f5;
+ }
+
+ void validate(CompositeKey o) {
+ TestCase.assertEquals(f1, o.f1);
+ TestCase.assertTrue(nullOrEqual(f2, o.f2));
+ TestCase.assertTrue(nullOrEqual(f3, o.f3));
+ TestCase.assertTrue(nullOrEqual(f4, o.f4));
+ TestCase.assertEquals(f5, o.f5);
+ TestCase.assertTrue(nullOrEqual(f5, o.f5));
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ CompositeKey o = (CompositeKey) other;
+ return f1 == o.f1 &&
+ nullOrEqual(f2, o.f2) &&
+ nullOrEqual(f3, o.f3) &&
+ nullOrEqual(f4, o.f4) &&
+ nullOrEqual(f5, o.f5);
+ }
+
+ @Override
+ public int hashCode() {
+ return f1;
+ }
+
+ @Override
+ public String toString() {
+ return "" + f1 + ' ' + f2 + ' ' + f3 + ' ' + f4 + ' ' + f5;
+ }
+ }
+
+ @Entity
+ static class UseCompositeKey implements MyEntity {
+
+ @PrimaryKey
+ private CompositeKey key;
+ private String one;
+
+ private UseCompositeKey() { }
+
+ private UseCompositeKey(CompositeKey key, String one) {
+ this.key = key;
+ this.one = one;
+ }
+
+ public Object getPriKeyObject() {
+ return key;
+ }
+
+ public void validate(Object other) {
+ UseCompositeKey o = (UseCompositeKey) other;
+ TestCase.assertNotNull(key);
+ TestCase.assertNotNull(o.key);
+ key.validate(o.key);
+ TestCase.assertTrue(nullOrEqual(one, o.one));
+ }
+ }
+
+ public void testComparableKey()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ ComparableKey key = new ComparableKey(123, 456);
+ checkEntity(UseComparableKey.class,
+ new UseComparableKey(key, "one"));
+
+ checkMetadata(UseComparableKey.class.getName(), new String[][] {
+ {"key", ComparableKey.class.getName()},
+ {"one", "java.lang.String"},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ checkMetadata(ComparableKey.class.getName(), new String[][] {
+ {"f1", "int"},
+ {"f2", "int"},
+ },
+ -1 /*priKeyIndex*/, null);
+
+ ClassMetadata classMeta =
+ model.getClassMetadata(UseComparableKey.class.getName());
+ assertNotNull(classMeta);
+
+ PersistKeyBinding binding = new PersistKeyBinding
+ (catalog, ComparableKey.class.getName(), false);
+
+ PersistComparator comparator = new PersistComparator
+ (/*ComparableKey.class.getName(),
+ classMeta.getCompositeKeyFields(),*/
+ binding);
+
+ compareKeys(comparator, binding, new ComparableKey(1, 1),
+ new ComparableKey(1, 1), 0);
+ compareKeys(comparator, binding, new ComparableKey(1, 2),
+ new ComparableKey(1, 1), -1);
+ compareKeys(comparator, binding, new ComparableKey(2, 1),
+ new ComparableKey(1, 1), -1);
+ compareKeys(comparator, binding, new ComparableKey(2, 1),
+ new ComparableKey(3, 1), 1);
+
+ close();
+ }
+
+ private void compareKeys(Comparator<byte[]> comparator,
+ EntryBinding binding,
+ Object key1,
+ Object key2,
+ int expectResult) {
+ DatabaseEntry entry1 = new DatabaseEntry();
+ DatabaseEntry entry2 = new DatabaseEntry();
+ binding.objectToEntry(key1, entry1);
+ binding.objectToEntry(key2, entry2);
+ int result = comparator.compare(entry1.getData(), entry2.getData());
+ assertEquals(expectResult, result);
+ }
+
+ @Persistent
+ static class ComparableKey implements Comparable<ComparableKey> {
+ @KeyField(2)
+ private int f1;
+ @KeyField(1)
+ private int f2;
+
+ private ComparableKey() {}
+
+ ComparableKey(int f1, int f2) {
+ this.f1 = f1;
+ this.f2 = f2;
+ }
+
+ void validate(ComparableKey o) {
+ TestCase.assertEquals(f1, o.f1);
+ TestCase.assertEquals(f2, o.f2);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ ComparableKey o = (ComparableKey) other;
+ return f1 == o.f1 && f2 == o.f2;
+ }
+
+ @Override
+ public int hashCode() {
+ return f1 + f2;
+ }
+
+ @Override
+ public String toString() {
+ return "" + f1 + ' ' + f2;
+ }
+
+ /** Compare f1 then f2, in reverse integer order. */
+ public int compareTo(ComparableKey o) {
+ if (f1 != o.f1) {
+ return o.f1 - f1;
+ } else {
+ return o.f2 - f2;
+ }
+ }
+ }
+
+ @Entity
+ static class UseComparableKey implements MyEntity {
+
+ @PrimaryKey
+ private ComparableKey key;
+ private String one;
+
+ private UseComparableKey() { }
+
+ private UseComparableKey(ComparableKey key, String one) {
+ this.key = key;
+ this.one = one;
+ }
+
+ public Object getPriKeyObject() {
+ return key;
+ }
+
+ public void validate(Object other) {
+ UseComparableKey o = (UseComparableKey) other;
+ TestCase.assertNotNull(key);
+ TestCase.assertNotNull(o.key);
+ key.validate(o.key);
+ TestCase.assertTrue(nullOrEqual(one, o.one));
+ }
+ }
+
+ public void testSecKeys()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ SecKeys obj = new SecKeys();
+ checkEntity(SecKeys.class, obj);
+
+ checkMetadata(SecKeys.class.getName(), new String[][] {
+ {"id", "long"},
+ {"f0", "boolean"},
+ {"g0", "boolean"},
+ {"f1", "char"},
+ {"g1", "char"},
+ {"f2", "byte"},
+ {"g2", "byte"},
+ {"f3", "short"},
+ {"g3", "short"},
+ {"f4", "int"},
+ {"g4", "int"},
+ {"f5", "long"},
+ {"g5", "long"},
+ {"f6", "float"},
+ {"g6", "float"},
+ {"f7", "double"},
+ {"g7", "double"},
+ {"f8", "java.lang.String"},
+ {"g8", "java.lang.String"},
+ {"f9", "java.math.BigInteger"},
+ {"g9", "java.math.BigInteger"},
+ //{"f10", "java.math.BigDecimal"},
+ //{"g10", "java.math.BigDecimal"},
+ {"f11", "java.util.Date"},
+ {"g11", "java.util.Date"},
+ {"f12", "java.lang.Boolean"},
+ {"g12", "java.lang.Boolean"},
+ {"f13", "java.lang.Character"},
+ {"g13", "java.lang.Character"},
+ {"f14", "java.lang.Byte"},
+ {"g14", "java.lang.Byte"},
+ {"f15", "java.lang.Short"},
+ {"g15", "java.lang.Short"},
+ {"f16", "java.lang.Integer"},
+ {"g16", "java.lang.Integer"},
+ {"f17", "java.lang.Long"},
+ {"g17", "java.lang.Long"},
+ {"f18", "java.lang.Float"},
+ {"g18", "java.lang.Float"},
+ {"f19", "java.lang.Double"},
+ {"g19", "java.lang.Double"},
+ {"f20", CompositeKey.class.getName()},
+ {"g20", CompositeKey.class.getName()},
+ {"f21", int[].class.getName()},
+ {"g21", int[].class.getName()},
+ {"f22", Integer[].class.getName()},
+ {"g22", Integer[].class.getName()},
+ {"f23", Set.class.getName()},
+ {"g23", Set.class.getName()},
+ {"f24", CompositeKey[].class.getName()},
+ {"g24", CompositeKey[].class.getName()},
+ {"f25", Set.class.getName()},
+ {"g25", Set.class.getName()},
+ {"f26", MyEnum.class.getName()},
+ {"g26", MyEnum.class.getName()},
+ {"f27", MyEnum[].class.getName()},
+ {"g27", MyEnum[].class.getName()},
+ {"f28", Set.class.getName()},
+ {"g28", Set.class.getName()},
+ {"f31", "java.util.Date"},
+ {"f32", "java.lang.Boolean"},
+ {"f33", "java.lang.Character"},
+ {"f34", "java.lang.Byte"},
+ {"f35", "java.lang.Short"},
+ {"f36", "java.lang.Integer"},
+ {"f37", "java.lang.Long"},
+ {"f38", "java.lang.Float"},
+ {"f39", "java.lang.Double"},
+ {"f40", CompositeKey.class.getName()},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ checkSecKey(obj, "f0", obj.f0, Boolean.class);
+ checkSecKey(obj, "f1", obj.f1, Character.class);
+ checkSecKey(obj, "f2", obj.f2, Byte.class);
+ checkSecKey(obj, "f3", obj.f3, Short.class);
+ checkSecKey(obj, "f4", obj.f4, Integer.class);
+ checkSecKey(obj, "f5", obj.f5, Long.class);
+ checkSecKey(obj, "f6", obj.f6, Float.class);
+ checkSecKey(obj, "f7", obj.f7, Double.class);
+ checkSecKey(obj, "f8", obj.f8, String.class);
+ checkSecKey(obj, "f9", obj.f9, BigInteger.class);
+ //checkSecKey(obj, "f10", obj.f10, BigDecimal.class);
+ checkSecKey(obj, "f11", obj.f11, Date.class);
+ checkSecKey(obj, "f12", obj.f12, Boolean.class);
+ checkSecKey(obj, "f13", obj.f13, Character.class);
+ checkSecKey(obj, "f14", obj.f14, Byte.class);
+ checkSecKey(obj, "f15", obj.f15, Short.class);
+ checkSecKey(obj, "f16", obj.f16, Integer.class);
+ checkSecKey(obj, "f17", obj.f17, Long.class);
+ checkSecKey(obj, "f18", obj.f18, Float.class);
+ checkSecKey(obj, "f19", obj.f19, Double.class);
+ checkSecKey(obj, "f20", obj.f20, CompositeKey.class);
+ checkSecKey(obj, "f26", obj.f26, MyEnum.class);
+
+ checkSecMultiKey(obj, "f21", toSet(obj.f21), Integer.class);
+ checkSecMultiKey(obj, "f22", toSet(obj.f22), Integer.class);
+ checkSecMultiKey(obj, "f23", toSet(obj.f23), Integer.class);
+ checkSecMultiKey(obj, "f24", toSet(obj.f24), CompositeKey.class);
+ checkSecMultiKey(obj, "f25", toSet(obj.f25), CompositeKey.class);
+ checkSecMultiKey(obj, "f27", toSet(obj.f27), MyEnum.class);
+ checkSecMultiKey(obj, "f28", toSet(obj.f28), MyEnum.class);
+
+ nullifySecKey(obj, "f8", obj.f8, String.class);
+ nullifySecKey(obj, "f9", obj.f9, BigInteger.class);
+ //nullifySecKey(obj, "f10", obj.f10, BigDecimal.class);
+ nullifySecKey(obj, "f11", obj.f11, Date.class);
+ nullifySecKey(obj, "f12", obj.f12, Boolean.class);
+ nullifySecKey(obj, "f13", obj.f13, Character.class);
+ nullifySecKey(obj, "f14", obj.f14, Byte.class);
+ nullifySecKey(obj, "f15", obj.f15, Short.class);
+ nullifySecKey(obj, "f16", obj.f16, Integer.class);
+ nullifySecKey(obj, "f17", obj.f17, Long.class);
+ nullifySecKey(obj, "f18", obj.f18, Float.class);
+ nullifySecKey(obj, "f19", obj.f19, Double.class);
+ nullifySecKey(obj, "f20", obj.f20, CompositeKey.class);
+ nullifySecKey(obj, "f26", obj.f26, MyEnum.class);
+
+ nullifySecMultiKey(obj, "f21", obj.f21, Integer.class);
+ nullifySecMultiKey(obj, "f22", obj.f22, Integer.class);
+ nullifySecMultiKey(obj, "f23", obj.f23, Integer.class);
+ nullifySecMultiKey(obj, "f24", obj.f24, CompositeKey.class);
+ nullifySecMultiKey(obj, "f25", obj.f25, CompositeKey.class);
+ nullifySecMultiKey(obj, "f27", obj.f27, MyEnum.class);
+ nullifySecMultiKey(obj, "f28", obj.f28, MyEnum.class);
+
+ nullifySecKey(obj, "f31", obj.f31, Date.class);
+ nullifySecKey(obj, "f32", obj.f32, Boolean.class);
+ nullifySecKey(obj, "f33", obj.f33, Character.class);
+ nullifySecKey(obj, "f34", obj.f34, Byte.class);
+ nullifySecKey(obj, "f35", obj.f35, Short.class);
+ nullifySecKey(obj, "f36", obj.f36, Integer.class);
+ nullifySecKey(obj, "f37", obj.f37, Long.class);
+ nullifySecKey(obj, "f38", obj.f38, Float.class);
+ nullifySecKey(obj, "f39", obj.f39, Double.class);
+ nullifySecKey(obj, "f40", obj.f40, CompositeKey.class);
+
+ close();
+ }
+
+ static Set toSet(int[] a) {
+ Set set = new HashSet();
+ for (int i : a) {
+ set.add(i);
+ }
+ return set;
+ }
+
+ static Set toSet(Object[] a) {
+ return new HashSet(Arrays.asList(a));
+ }
+
+ static Set toSet(Set s) {
+ return s;
+ }
+
+ @Entity
+ static class SecKeys implements MyEntity {
+
+ @PrimaryKey
+ long id;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final boolean f0 = false;
+ private final boolean g0 = false;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final char f1 = '1';
+ private final char g1 = '1';
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final byte f2 = 2;
+ private final byte g2 = 2;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final short f3 = 3;
+ private final short g3 = 3;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final int f4 = 4;
+ private final int g4 = 4;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final long f5 = 5;
+ private final long g5 = 5;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final float f6 = 6.6f;
+ private final float g6 = 6.6f;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final double f7 = 7.7;
+ private final double g7 = 7.7;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final String f8 = "8";
+ private final String g8 = "8";
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private BigInteger f9;
+ private BigInteger g9;
+
+ //@SecondaryKey(relate=MANY_TO_ONE)
+ //private BigDecimal f10;
+ //private BigDecimal g10;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Date f11 = new Date(11);
+ private final Date g11 = new Date(11);
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Boolean f12 = true;
+ private final Boolean g12 = true;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Character f13 = '3';
+ private final Character g13 = '3';
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Byte f14 = 14;
+ private final Byte g14 = 14;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Short f15 = 15;
+ private final Short g15 = 15;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Integer f16 = 16;
+ private final Integer g16 = 16;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Long f17= 17L;
+ private final Long g17= 17L;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Float f18 = 18.18f;
+ private final Float g18 = 18.18f;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Double f19 = 19.19;
+ private final Double g19 = 19.19;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final CompositeKey f20 =
+ new CompositeKey(20, 20L, "20", BigInteger.valueOf(20),
+ MyEnum.ONE);
+ private final CompositeKey g20 =
+ new CompositeKey(20, 20L, "20", BigInteger.valueOf(20),
+ MyEnum.TWO);
+
+ private static int[] arrayOfInt = { 100, 101, 102 };
+
+ private static Integer[] arrayOfInteger = { 100, 101, 102 };
+
+ private static CompositeKey[] arrayOfCompositeKey = {
+ new CompositeKey(100, 100L, "100", BigInteger.valueOf(100),
+ MyEnum.ONE),
+ new CompositeKey(101, 101L, "101", BigInteger.valueOf(101),
+ MyEnum.TWO),
+ new CompositeKey(102, 102L, "102", BigInteger.valueOf(102),
+ MyEnum.TWO),
+ };
+
+ private static MyEnum[] arrayOfEnum =
+ new MyEnum[] { MyEnum.ONE, MyEnum.TWO };
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final int[] f21 = arrayOfInt;
+ private final int[] g21 = f21;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final Integer[] f22 = arrayOfInteger;
+ private final Integer[] g22 = f22;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final Set<Integer> f23 = toSet(arrayOfInteger);
+ private final Set<Integer> g23 = f23;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final CompositeKey[] f24 = arrayOfCompositeKey;
+ private final CompositeKey[] g24 = f24;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final Set<CompositeKey> f25 = toSet(arrayOfCompositeKey);
+ private final Set<CompositeKey> g25 = f25;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final MyEnum f26 = MyEnum.TWO;
+ private final MyEnum g26 = f26;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final MyEnum[] f27 = arrayOfEnum;
+ private final MyEnum[] g27 = f27;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final Set<MyEnum> f28 = toSet(arrayOfEnum);
+ private final Set<MyEnum> g28 = f28;
+
+ /* Repeated key values to test shared references. */
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Date f31 = f11;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Boolean f32 = f12;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Character f33 = f13;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Byte f34 = f14;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Short f35 = f15;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Integer f36 = f16;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Long f37= f17;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Float f38 = f18;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final Double f39 = f19;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private final CompositeKey f40 = f20;
+
+ public Object getPriKeyObject() {
+ return id;
+ }
+
+ public void validate(Object other) {
+ SecKeys o = (SecKeys) other;
+ TestCase.assertEquals(id, o.id);
+
+ TestCase.assertEquals(f0, o.f0);
+ TestCase.assertEquals(f1, o.f1);
+ TestCase.assertEquals(f2, o.f2);
+ TestCase.assertEquals(f3, o.f3);
+ TestCase.assertEquals(f4, o.f4);
+ TestCase.assertEquals(f5, o.f5);
+ TestCase.assertEquals(f6, o.f6);
+ TestCase.assertEquals(f7, o.f7);
+ TestCase.assertEquals(f8, o.f8);
+ TestCase.assertEquals(f9, o.f9);
+ //TestCase.assertEquals(f10, o.f10);
+ TestCase.assertEquals(f11, o.f11);
+ TestCase.assertEquals(f12, o.f12);
+ TestCase.assertEquals(f13, o.f13);
+ TestCase.assertEquals(f14, o.f14);
+ TestCase.assertEquals(f15, o.f15);
+ TestCase.assertEquals(f16, o.f16);
+ TestCase.assertEquals(f17, o.f17);
+ TestCase.assertEquals(f18, o.f18);
+ TestCase.assertEquals(f19, o.f19);
+ TestCase.assertEquals(f20, o.f20);
+ TestCase.assertTrue(Arrays.equals(f21, o.f21));
+ TestCase.assertTrue(Arrays.equals(f22, o.f22));
+ TestCase.assertEquals(f23, o.f23);
+ TestCase.assertTrue(Arrays.equals(f24, o.f24));
+ TestCase.assertEquals(f25, o.f25);
+
+ TestCase.assertEquals(g0, o.g0);
+ TestCase.assertEquals(g1, o.g1);
+ TestCase.assertEquals(g2, o.g2);
+ TestCase.assertEquals(g3, o.g3);
+ TestCase.assertEquals(g4, o.g4);
+ TestCase.assertEquals(g5, o.g5);
+ TestCase.assertEquals(g6, o.g6);
+ TestCase.assertEquals(g7, o.g7);
+ TestCase.assertEquals(g8, o.g8);
+ TestCase.assertEquals(g9, o.g9);
+ //TestCase.assertEquals(g10, o.g10);
+ TestCase.assertEquals(g11, o.g11);
+ TestCase.assertEquals(g12, o.g12);
+ TestCase.assertEquals(g13, o.g13);
+ TestCase.assertEquals(g14, o.g14);
+ TestCase.assertEquals(g15, o.g15);
+ TestCase.assertEquals(g16, o.g16);
+ TestCase.assertEquals(g17, o.g17);
+ TestCase.assertEquals(g18, o.g18);
+ TestCase.assertEquals(g19, o.g19);
+ TestCase.assertEquals(g20, o.g20);
+ TestCase.assertTrue(Arrays.equals(g21, o.g21));
+ TestCase.assertTrue(Arrays.equals(g22, o.g22));
+ TestCase.assertEquals(g23, o.g23);
+ TestCase.assertTrue(Arrays.equals(g24, o.g24));
+ TestCase.assertEquals(g25, o.g25);
+
+ TestCase.assertEquals(f31, o.f31);
+ TestCase.assertEquals(f32, o.f32);
+ TestCase.assertEquals(f33, o.f33);
+ TestCase.assertEquals(f34, o.f34);
+ TestCase.assertEquals(f35, o.f35);
+ TestCase.assertEquals(f36, o.f36);
+ TestCase.assertEquals(f37, o.f37);
+ TestCase.assertEquals(f38, o.f38);
+ TestCase.assertEquals(f39, o.f39);
+ TestCase.assertEquals(f40, o.f40);
+
+ checkSameIfNonNull(o.f31, o.f11);
+ checkSameIfNonNull(o.f32, o.f12);
+ checkSameIfNonNull(o.f33, o.f13);
+ checkSameIfNonNull(o.f34, o.f14);
+ checkSameIfNonNull(o.f35, o.f15);
+ checkSameIfNonNull(o.f36, o.f16);
+ checkSameIfNonNull(o.f37, o.f17);
+ checkSameIfNonNull(o.f38, o.f18);
+ checkSameIfNonNull(o.f39, o.f19);
+ checkSameIfNonNull(o.f40, o.f20);
+ }
+ }
+
+ public void testSecKeyRefToPriKey()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ SecKeyRefToPriKey obj = new SecKeyRefToPriKey();
+ checkEntity(SecKeyRefToPriKey.class, obj);
+
+ checkMetadata(SecKeyRefToPriKey.class.getName(), new String[][] {
+ {"priKey", "java.lang.String"},
+ {"secKey1", "java.lang.String"},
+ {"secKey2", String[].class.getName()},
+ {"secKey3", Set.class.getName()},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ checkSecKey(obj, "secKey1", obj.secKey1, String.class);
+ checkSecMultiKey(obj, "secKey2", toSet(obj.secKey2), String.class);
+ checkSecMultiKey(obj, "secKey3", toSet(obj.secKey3), String.class);
+
+ close();
+ }
+
+ @Entity
+ static class SecKeyRefToPriKey implements MyEntity {
+
+ @PrimaryKey
+ private final String priKey;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ private final String secKey1;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final String[] secKey2;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private final Set<String> secKey3 = new HashSet<String>();
+
+ private SecKeyRefToPriKey() {
+ priKey = "sharedValue";
+ secKey1 = priKey;
+ secKey2 = new String[] { priKey };
+ secKey3.add(priKey);
+ }
+
+ public Object getPriKeyObject() {
+ return priKey;
+ }
+
+ public void validate(Object other) {
+ SecKeyRefToPriKey o = (SecKeyRefToPriKey) other;
+ TestCase.assertEquals(priKey, o.priKey);
+ TestCase.assertNotNull(o.secKey1);
+ TestCase.assertEquals(1, o.secKey2.length);
+ TestCase.assertEquals(1, o.secKey3.size());
+ TestCase.assertSame(o.secKey1, o.priKey);
+ TestCase.assertSame(o.secKey2[0], o.priKey);
+ TestCase.assertSame(o.secKey3.iterator().next(), o.priKey);
+ }
+ }
+
+ public void testSecKeyInSuperclass()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ SecKeyInSuperclassEntity obj = new SecKeyInSuperclassEntity();
+ checkEntity(SecKeyInSuperclassEntity.class, obj);
+
+ checkMetadata(SecKeyInSuperclass.class.getName(),
+ new String[][] {
+ {"priKey", "java.lang.String"},
+ {"secKey1", String.class.getName()},
+ },
+ 0/*priKeyIndex*/, null);
+
+ checkMetadata(SecKeyInSuperclassEntity.class.getName(),
+ new String[][] {
+ {"secKey2", "java.lang.String"},
+ },
+ -1 /*priKeyIndex*/, SecKeyInSuperclass.class.getName());
+
+ checkSecKey
+ (obj, SecKeyInSuperclassEntity.class, "secKey1", obj.secKey1,
+ String.class);
+ checkSecKey
+ (obj, SecKeyInSuperclassEntity.class, "secKey2", obj.secKey2,
+ String.class);
+
+ close();
+ }
+
+ @Persistent
+ static class SecKeyInSuperclass implements MyEntity {
+
+ @PrimaryKey
+ String priKey = "1";
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ String secKey1 = "1";
+
+ public Object getPriKeyObject() {
+ return priKey;
+ }
+
+ public void validate(Object other) {
+ SecKeyInSuperclass o = (SecKeyInSuperclass) other;
+ TestCase.assertEquals(secKey1, o.secKey1);
+ }
+ }
+
+ @Entity
+ static class SecKeyInSuperclassEntity extends SecKeyInSuperclass {
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ String secKey2 = "2";
+
+ @Override
+ public void validate(Object other) {
+ super.validate(other);
+ SecKeyInSuperclassEntity o = (SecKeyInSuperclassEntity) other;
+ TestCase.assertEquals(priKey, o.priKey);
+ TestCase.assertEquals(secKey2, o.secKey2);
+ }
+ }
+
+ public void testSecKeyInSubclass()
+ throws FileNotFoundException, DatabaseException {
+
+ open();
+
+ SecKeyInSubclass obj = new SecKeyInSubclass();
+ checkEntity(SecKeyInSubclassEntity.class, obj);
+
+ checkMetadata(SecKeyInSubclassEntity.class.getName(), new String[][] {
+ {"priKey", "java.lang.String"},
+ {"secKey1", "java.lang.String"},
+ },
+ 0 /*priKeyIndex*/, null);
+
+ checkMetadata(SecKeyInSubclass.class.getName(), new String[][] {
+ {"secKey2", String.class.getName()},
+ },
+ -1 /*priKeyIndex*/,
+ SecKeyInSubclassEntity.class.getName());
+
+ checkSecKey
+ (obj, SecKeyInSubclassEntity.class, "secKey1", obj.secKey1,
+ String.class);
+ checkSecKey
+ (obj, SecKeyInSubclassEntity.class, "secKey2", obj.secKey2,
+ String.class);
+
+ close();
+ }
+
+ @Entity
+ static class SecKeyInSubclassEntity implements MyEntity {
+
+ @PrimaryKey
+ String priKey = "1";
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ String secKey1;
+
+ public Object getPriKeyObject() {
+ return priKey;
+ }
+
+ public void validate(Object other) {
+ SecKeyInSubclassEntity o = (SecKeyInSubclassEntity) other;
+ TestCase.assertEquals(priKey, o.priKey);
+ TestCase.assertEquals(secKey1, o.secKey1);
+ }
+ }
+
+ @Persistent
+ static class SecKeyInSubclass extends SecKeyInSubclassEntity {
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ String secKey2 = "2";
+
+ @Override
+ public void validate(Object other) {
+ super.validate(other);
+ SecKeyInSubclass o = (SecKeyInSubclass) other;
+ TestCase.assertEquals(secKey2, o.secKey2);
+ }
+ }
+
+ private static void checkSameIfNonNull(Object o1, Object o2) {
+ if (o1 != null && o2 != null) {
+ assertSame(o1, o2);
+ }
+ }
+
+ private void checkEntity(Class entityCls, MyEntity entity) {
+ Object priKey = entity.getPriKeyObject();
+ Class keyCls = priKey.getClass();
+ DatabaseEntry keyEntry2 = new DatabaseEntry();
+ DatabaseEntry dataEntry2 = new DatabaseEntry();
+
+ /* Write object, read it back and validate (compare) it. */
+ PersistEntityBinding entityBinding =
+ new PersistEntityBinding(catalog, entityCls.getName(), false);
+ entityBinding.objectToData(entity, dataEntry);
+ entityBinding.objectToKey(entity, keyEntry);
+ Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry);
+ entity.validate(entity2);
+
+ /* Read back the primary key and validate it. */
+ PersistKeyBinding keyBinding =
+ new PersistKeyBinding(catalog, keyCls.getName(), false);
+ Object priKey2 = keyBinding.entryToObject(keyEntry);
+ assertEquals(priKey, priKey2);
+ keyBinding.objectToEntry(priKey2, keyEntry2);
+ assertEquals(keyEntry, keyEntry2);
+
+ /* Check raw entity binding. */
+ PersistEntityBinding rawEntityBinding =
+ new PersistEntityBinding(catalog, entityCls.getName(), true);
+ RawObject rawEntity =
+ (RawObject) rawEntityBinding.entryToObject(keyEntry, dataEntry);
+ rawEntityBinding.objectToKey(rawEntity, keyEntry2);
+ rawEntityBinding.objectToData(rawEntity, dataEntry2);
+ entity2 = entityBinding.entryToObject(keyEntry2, dataEntry2);
+ entity.validate(entity2);
+ RawObject rawEntity2 =
+ (RawObject) rawEntityBinding.entryToObject(keyEntry2, dataEntry2);
+ assertEquals(rawEntity, rawEntity2);
+ assertEquals(dataEntry, dataEntry2);
+ assertEquals(keyEntry, keyEntry2);
+
+ /* Check that raw entity can be converted to a regular entity. */
+ entity2 = catalog.convertRawObject(rawEntity, null);
+ entity.validate(entity2);
+
+ /* Check raw key binding. */
+ PersistKeyBinding rawKeyBinding =
+ new PersistKeyBinding(catalog, keyCls.getName(), true);
+ Object rawKey = rawKeyBinding.entryToObject(keyEntry);
+ rawKeyBinding.objectToEntry(rawKey, keyEntry2);
+ priKey2 = keyBinding.entryToObject(keyEntry2);
+ assertEquals(priKey, priKey2);
+ assertEquals(keyEntry, keyEntry2);
+ }
+
+ private void checkSecKey(MyEntity entity,
+ String keyName,
+ Object keyValue,
+ Class keyCls)
+ throws DatabaseException {
+
+ checkSecKey(entity, entity.getClass(), keyName, keyValue, keyCls);
+ }
+
+ private void checkSecKey(MyEntity entity,
+ Class entityCls,
+ String keyName,
+ Object keyValue,
+ Class keyCls)
+ throws DatabaseException {
+
+ /* Get entity metadata. */
+ EntityMetadata entityMeta =
+ model.getEntityMetadata(entityCls.getName());
+ assertNotNull(entityMeta);
+
+ /* Get secondary key metadata. */
+ SecondaryKeyMetadata secKeyMeta =
+ entityMeta.getSecondaryKeys().get(keyName);
+ assertNotNull(secKeyMeta);
+
+ /* Create key creator/nullifier. */
+ SecondaryKeyCreator keyCreator = new PersistKeyCreator
+ (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+ false /*rawAcess*/);
+
+ /* Convert entity to bytes. */
+ PersistEntityBinding entityBinding =
+ new PersistEntityBinding(catalog, entityCls.getName(), false);
+ entityBinding.objectToData(entity, dataEntry);
+ entityBinding.objectToKey(entity, keyEntry);
+
+ /* Extract secondary key bytes from entity bytes. */
+ DatabaseEntry secKeyEntry = new DatabaseEntry();
+ boolean isKeyPresent = keyCreator.createSecondaryKey
+ (null, keyEntry, dataEntry, secKeyEntry);
+ assertEquals(keyValue != null, isKeyPresent);
+
+ /* Convert secondary key bytes back to an object. */
+ PersistKeyBinding keyBinding =
+ new PersistKeyBinding(catalog, keyCls.getName(), false);
+ if (isKeyPresent) {
+ Object keyValue2 = keyBinding.entryToObject(secKeyEntry);
+ assertEquals(keyValue, keyValue2);
+ DatabaseEntry secKeyEntry2 = new DatabaseEntry();
+ keyBinding.objectToEntry(keyValue2, secKeyEntry2);
+ assertEquals(secKeyEntry, secKeyEntry2);
+ }
+ }
+
+ private void checkSecMultiKey(MyEntity entity,
+ String keyName,
+ Set keyValues,
+ Class keyCls)
+ throws DatabaseException {
+
+ /* Get entity metadata. */
+ Class entityCls = entity.getClass();
+ EntityMetadata entityMeta =
+ model.getEntityMetadata(entityCls.getName());
+ assertNotNull(entityMeta);
+
+ /* Get secondary key metadata. */
+ SecondaryKeyMetadata secKeyMeta =
+ entityMeta.getSecondaryKeys().get(keyName);
+ assertNotNull(secKeyMeta);
+
+ /* Create key creator/nullifier. */
+ SecondaryMultiKeyCreator keyCreator = new PersistKeyCreator
+ (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+ false /*rawAcess*/);
+
+ /* Convert entity to bytes. */
+ PersistEntityBinding entityBinding =
+ new PersistEntityBinding(catalog, entityCls.getName(), false);
+ entityBinding.objectToData(entity, dataEntry);
+ entityBinding.objectToKey(entity, keyEntry);
+
+ /* Extract secondary key bytes from entity bytes. */
+ Set<DatabaseEntry> results = new HashSet<DatabaseEntry>();
+ keyCreator.createSecondaryKeys
+ (null, keyEntry, dataEntry, results);
+ assertEquals(keyValues.size(), results.size());
+
+ /* Convert secondary key bytes back to objects. */
+ PersistKeyBinding keyBinding =
+ new PersistKeyBinding(catalog, keyCls.getName(), false);
+ Set keyValues2 = new HashSet();
+ for (DatabaseEntry secKeyEntry : results) {
+ Object keyValue2 = keyBinding.entryToObject(secKeyEntry);
+ keyValues2.add(keyValue2);
+ }
+ assertEquals(keyValues, keyValues2);
+ }
+
+ private void nullifySecKey(MyEntity entity,
+ String keyName,
+ Object keyValue,
+ Class keyCls)
+ throws DatabaseException {
+
+ /* Get entity metadata. */
+ Class entityCls = entity.getClass();
+ EntityMetadata entityMeta =
+ model.getEntityMetadata(entityCls.getName());
+ assertNotNull(entityMeta);
+
+ /* Get secondary key metadata. */
+ SecondaryKeyMetadata secKeyMeta =
+ entityMeta.getSecondaryKeys().get(keyName);
+ assertNotNull(secKeyMeta);
+
+ /* Create key creator/nullifier. */
+ ForeignMultiKeyNullifier keyNullifier = new PersistKeyCreator
+ (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+ false /*rawAcess*/);
+
+ /* Convert entity to bytes. */
+ PersistEntityBinding entityBinding =
+ new PersistEntityBinding(catalog, entityCls.getName(), false);
+ entityBinding.objectToData(entity, dataEntry);
+ entityBinding.objectToKey(entity, keyEntry);
+
+ /* Convert secondary key to bytes. */
+ PersistKeyBinding keyBinding =
+ new PersistKeyBinding(catalog, keyCls.getName(), false);
+ DatabaseEntry secKeyEntry = new DatabaseEntry();
+ if (keyValue != null) {
+ keyBinding.objectToEntry(keyValue, secKeyEntry);
+ }
+
+ /* Nullify secondary key bytes within entity bytes. */
+ boolean isKeyPresent = keyNullifier.nullifyForeignKey
+ (null, keyEntry, dataEntry, secKeyEntry);
+ assertEquals(keyValue != null, isKeyPresent);
+
+ /* Convert modified entity bytes back to an entity. */
+ Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry);
+ setFieldToNull(entity, keyName);
+ entity.validate(entity2);
+
+ /* Do a full check after nullifying it. */
+ checkSecKey(entity, keyName, null, keyCls);
+ }
+
+ private void nullifySecMultiKey(MyEntity entity,
+ String keyName,
+ Object keyValue,
+ Class keyCls)
+ throws DatabaseException {
+
+ /* Get entity metadata. */
+ Class entityCls = entity.getClass();
+ EntityMetadata entityMeta =
+ model.getEntityMetadata(entityCls.getName());
+ assertNotNull(entityMeta);
+
+ /* Get secondary key metadata. */
+ SecondaryKeyMetadata secKeyMeta =
+ entityMeta.getSecondaryKeys().get(keyName);
+ assertNotNull(secKeyMeta);
+
+ /* Create key creator/nullifier. */
+ ForeignMultiKeyNullifier keyNullifier = new PersistKeyCreator
+ (catalog, entityMeta, keyCls.getName(), secKeyMeta,
+ false /*rawAcess*/);
+
+ /* Convert entity to bytes. */
+ PersistEntityBinding entityBinding =
+ new PersistEntityBinding(catalog, entityCls.getName(), false);
+ entityBinding.objectToData(entity, dataEntry);
+ entityBinding.objectToKey(entity, keyEntry);
+
+ /* Get secondary key binding. */
+ PersistKeyBinding keyBinding =
+ new PersistKeyBinding(catalog, keyCls.getName(), false);
+ DatabaseEntry secKeyEntry = new DatabaseEntry();
+
+ /* Nullify one key value at a time until all of them are gone. */
+ while (true) {
+ Object fieldObj = getField(entity, keyName);
+ fieldObj = nullifyFirstElement(fieldObj, keyBinding, secKeyEntry);
+ if (fieldObj == null) {
+ break;
+ }
+ setField(entity, keyName, fieldObj);
+
+ /* Nullify secondary key bytes within entity bytes. */
+ boolean isKeyPresent = keyNullifier.nullifyForeignKey
+ (null, keyEntry, dataEntry, secKeyEntry);
+ assertEquals(keyValue != null, isKeyPresent);
+
+ /* Convert modified entity bytes back to an entity. */
+ Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry);
+ entity.validate(entity2);
+
+ /* Do a full check after nullifying it. */
+ Set keyValues;
+ if (fieldObj instanceof Set) {
+ keyValues = (Set) fieldObj;
+ } else if (fieldObj instanceof Object[]) {
+ keyValues = toSet((Object[]) fieldObj);
+ } else if (fieldObj instanceof int[]) {
+ keyValues = toSet((int[]) fieldObj);
+ } else {
+ throw new IllegalStateException(fieldObj.getClass().getName());
+ }
+ checkSecMultiKey(entity, keyName, keyValues, keyCls);
+ }
+ }
+
+ /**
+ * Nullifies the first element of an array or collection object by removing
+ * it from the array or collection. Returns the resulting array or
+ * collection. Also outputs the removed element to the keyEntry using the
+ * keyBinding.
+ */
+ private Object nullifyFirstElement(Object obj,
+ EntryBinding keyBinding,
+ DatabaseEntry keyEntry) {
+ if (obj instanceof Collection) {
+ Iterator i = ((Collection) obj).iterator();
+ if (i.hasNext()) {
+ Object elem = i.next();
+ i.remove();
+ keyBinding.objectToEntry(elem, keyEntry);
+ return obj;
+ } else {
+ return null;
+ }
+ } else if (obj instanceof Object[]) {
+ Object[] a1 = (Object[]) obj;
+ if (a1.length > 0) {
+ Object[] a2 = (Object[]) Array.newInstance
+ (obj.getClass().getComponentType(), a1.length - 1);
+ System.arraycopy(a1, 1, a2, 0, a2.length);
+ keyBinding.objectToEntry(a1[0], keyEntry);
+ return a2;
+ } else {
+ return null;
+ }
+ } else if (obj instanceof int[]) {
+ int[] a1 = (int[]) obj;
+ if (a1.length > 0) {
+ int[] a2 = new int[a1.length - 1];
+ System.arraycopy(a1, 1, a2, 0, a2.length);
+ keyBinding.objectToEntry(a1[0], keyEntry);
+ return a2;
+ } else {
+ return null;
+ }
+ } else {
+ throw new IllegalStateException(obj.getClass().getName());
+ }
+ }
+
+ private void checkMetadata(String clsName,
+ String[][] nameTypePairs,
+ int priKeyIndex,
+ String superClsName)
+ throws DatabaseException {
+
+ /* Check metadata/types against the live model. */
+ checkMetadata
+ (catalog, model, clsName, nameTypePairs, priKeyIndex,
+ superClsName);
+
+ /*
+ * Open a catalog that uses the stored model.
+ */
+ PersistCatalog storedCatalog = new PersistCatalog
+ (null, env, STORE_PREFIX, STORE_PREFIX + "catalog", null, null,
+ null, false /*useCurrentModel*/, null /*Store*/);
+ EntityModel storedModel = storedCatalog.getResolvedModel();
+
+ /* Check metadata/types against the stored catalog/model. */
+ checkMetadata
+ (storedCatalog, storedModel, clsName, nameTypePairs, priKeyIndex,
+ superClsName);
+
+ storedCatalog.close();
+ }
+
+ private void checkMetadata(PersistCatalog checkCatalog,
+ EntityModel checkModel,
+ String clsName,
+ String[][] nameTypePairs,
+ int priKeyIndex,
+ String superClsName) {
+ ClassMetadata classMeta = checkModel.getClassMetadata(clsName);
+ assertNotNull(clsName, classMeta);
+
+ PrimaryKeyMetadata priKeyMeta = classMeta.getPrimaryKey();
+ if (priKeyIndex >= 0) {
+ assertNotNull(priKeyMeta);
+ String fieldName = nameTypePairs[priKeyIndex][0];
+ String fieldType = nameTypePairs[priKeyIndex][1];
+ assertEquals(priKeyMeta.getName(), fieldName);
+ assertEquals(priKeyMeta.getClassName(), fieldType);
+ assertEquals(priKeyMeta.getDeclaringClassName(), clsName);
+ assertNull(priKeyMeta.getSequenceName());
+ } else {
+ assertNull(priKeyMeta);
+ }
+
+ RawType type = checkCatalog.getFormat(clsName);
+ assertNotNull(type);
+ assertEquals(clsName, type.getClassName());
+ assertEquals(0, type.getVersion());
+ assertTrue(!type.isSimple());
+ assertTrue(!type.isPrimitive());
+ assertTrue(!type.isEnum());
+ assertNull(type.getEnumConstants());
+ assertTrue(!type.isArray());
+ assertEquals(0, type.getDimensions());
+ assertNull(type.getComponentType());
+ RawType superType = type.getSuperType();
+ if (superClsName != null) {
+ assertNotNull(superType);
+ assertEquals(superClsName, superType.getClassName());
+ } else {
+ assertNull(superType);
+ }
+
+ Map<String,RawField> fields = type.getFields();
+ assertNotNull(fields);
+
+ int nFields = nameTypePairs.length;
+ assertEquals(nFields, fields.size());
+
+ for (String[] pair : nameTypePairs) {
+ String fieldName = pair[0];
+ String fieldType = pair[1];
+ Class fieldCls;
+ try {
+ fieldCls = SimpleCatalog.classForName(fieldType);
+ } catch (ClassNotFoundException e) {
+ fail(e.toString());
+ return; /* For compiler */
+ }
+ RawField field = fields.get(fieldName);
+ assertNotNull(field);
+ assertEquals(fieldName, field.getName());
+ type = field.getType();
+ assertNotNull(type);
+ int dim = getArrayDimensions(fieldType);
+ while (dim > 0) {
+ assertEquals(dim, type.getDimensions());
+ assertEquals(dim, getArrayDimensions(fieldType));
+ assertEquals(true, type.isArray());
+ assertEquals(fieldType, type.getClassName());
+ assertEquals(0, type.getVersion());
+ assertTrue(!type.isSimple());
+ assertTrue(!type.isPrimitive());
+ assertTrue(!type.isEnum());
+ assertNull(type.getEnumConstants());
+ fieldType = getArrayComponent(fieldType, dim);
+ type = type.getComponentType();
+ assertNotNull(fieldType, type);
+ dim -= 1;
+ }
+ assertEquals(fieldType, type.getClassName());
+ List<String> enums = getEnumConstants(fieldType);
+ assertEquals(isSimpleType(fieldType), type.isSimple());
+ assertEquals(isPrimitiveType(fieldType), type.isPrimitive());
+ assertNull(type.getComponentType());
+ assertTrue(!type.isArray());
+ assertEquals(0, type.getDimensions());
+ if (enums != null) {
+ assertTrue(type.isEnum());
+ assertEquals(enums, type.getEnumConstants());
+ assertNull(type.getSuperType());
+ } else {
+ assertTrue(!type.isEnum());
+ assertNull(type.getEnumConstants());
+ }
+ }
+ }
+
+ private List<String> getEnumConstants(String clsName) {
+ if (isPrimitiveType(clsName)) {
+ return null;
+ }
+ Class cls;
+ try {
+ cls = Class.forName(clsName);
+ } catch (ClassNotFoundException e) {
+ fail(e.toString());
+ return null; /* Never happens. */
+ }
+ if (!cls.isEnum()) {
+ return null;
+ }
+ List<String> enums = new ArrayList<String>();
+ Object[] vals = cls.getEnumConstants();
+ for (Object val : vals) {
+ enums.add(val.toString());
+ }
+ return enums;
+ }
+
+ private String getArrayComponent(String clsName, int dim) {
+ clsName = clsName.substring(1);
+ if (dim > 1) {
+ return clsName;
+ }
+ if (clsName.charAt(0) == 'L' &&
+ clsName.charAt(clsName.length() - 1) == ';') {
+ return clsName.substring(1, clsName.length() - 1);
+ }
+ if (clsName.length() != 1) {
+ fail();
+ }
+ switch (clsName.charAt(0)) {
+ case 'Z': return "boolean";
+ case 'B': return "byte";
+ case 'C': return "char";
+ case 'D': return "double";
+ case 'F': return "float";
+ case 'I': return "int";
+ case 'J': return "long";
+ case 'S': return "short";
+ default: fail();
+ }
+ return null; /* Should never happen. */
+ }
+
+ private static int getArrayDimensions(String clsName) {
+ int i = 0;
+ while (clsName.charAt(i) == '[') {
+ i += 1;
+ }
+ return i;
+ }
+
+ private static boolean isSimpleType(String clsName) {
+ return isPrimitiveType(clsName) ||
+ clsName.equals("java.lang.Boolean") ||
+ clsName.equals("java.lang.Character") ||
+ clsName.equals("java.lang.Byte") ||
+ clsName.equals("java.lang.Short") ||
+ clsName.equals("java.lang.Integer") ||
+ clsName.equals("java.lang.Long") ||
+ clsName.equals("java.lang.Float") ||
+ clsName.equals("java.lang.Double") ||
+ clsName.equals("java.lang.String") ||
+ clsName.equals("java.math.BigInteger") ||
+ //clsName.equals("java.math.BigDecimal") ||
+ clsName.equals("java.util.Date");
+ }
+
+ private static boolean isPrimitiveType(String clsName) {
+ return clsName.equals("boolean") ||
+ clsName.equals("char") ||
+ clsName.equals("byte") ||
+ clsName.equals("short") ||
+ clsName.equals("int") ||
+ clsName.equals("long") ||
+ clsName.equals("float") ||
+ clsName.equals("double");
+ }
+
+ interface MyEntity {
+ Object getPriKeyObject();
+ void validate(Object other);
+ }
+
+ private static boolean nullOrEqual(Object o1, Object o2) {
+ return (o1 != null) ? o1.equals(o2) : (o2 == null);
+ }
+
+ private static String arrayToString(Object[] array) {
+ StringBuffer buf = new StringBuffer();
+ buf.append('[');
+ for (Object o : array) {
+ if (o instanceof Object[]) {
+ buf.append(arrayToString((Object[]) o));
+ } else {
+ buf.append(o);
+ }
+ buf.append(',');
+ }
+ buf.append(']');
+ return buf.toString();
+ }
+
+ private void setFieldToNull(Object obj, String fieldName) {
+ try {
+ Field field = obj.getClass().getDeclaredField(fieldName);
+ field.setAccessible(true);
+ field.set(obj, null);
+ } catch (NoSuchFieldException e) {
+ fail(e.toString());
+ } catch (IllegalAccessException e) {
+ fail(e.toString());
+ }
+ }
+
+ private void setField(Object obj, String fieldName, Object fieldValue) {
+ try {
+ Field field = obj.getClass().getDeclaredField(fieldName);
+ field.setAccessible(true);
+ field.set(obj, fieldValue);
+ } catch (NoSuchFieldException e) {
+ throw new IllegalStateException(e.toString());
+ } catch (IllegalAccessException e) {
+ throw new IllegalStateException(e.toString());
+ }
+ }
+
+ private Object getField(Object obj, String fieldName) {
+ try {
+ Field field = obj.getClass().getDeclaredField(fieldName);
+ field.setAccessible(true);
+ return field.get(obj);
+ } catch (NoSuchFieldException e) {
+ throw new IllegalStateException(e.toString());
+ } catch (IllegalAccessException e) {
+ throw new IllegalStateException(e.toString());
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced0.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced0.java
new file mode 100644
index 0000000..f29a24c
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced0.java
@@ -0,0 +1,36 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * For running ASMifier -- before any enhancements.
+ */
+@Entity
+class Enhanced0 {
+
+ @PrimaryKey
+ private String f1;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private int f2;
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private String f3;
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private String f4;
+
+ private int f5;
+ private String f6;
+ private String f7;
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced1.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced1.java
new file mode 100644
index 0000000..2eb93ba
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced1.java
@@ -0,0 +1,260 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import com.sleepycat.persist.impl.Enhanced;
+import com.sleepycat.persist.impl.EnhancedAccessor;
+import com.sleepycat.persist.impl.EntityInput;
+import com.sleepycat.persist.impl.EntityOutput;
+import com.sleepycat.persist.impl.Format;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+
+/**
+ * For running ASMifier -- adds minimal enhancements.
+ */
+@Entity
+class Enhanced1 implements Enhanced {
+
+ @PrimaryKey
+ private String f1;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private int f2;
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private String f3;
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private String f4;
+
+ private int f5;
+ private String f6;
+ private String f7;
+ private int f8;
+ private int f9;
+ private int f10;
+ private int f11;
+ private int f12;
+
+ static {
+ EnhancedAccessor.registerClass(null, new Enhanced1());
+ }
+
+ public Object bdbNewInstance() {
+ return new Enhanced1();
+ }
+
+ public Object bdbNewArray(int len) {
+ return new Enhanced1[len];
+ }
+
+ public boolean bdbIsPriKeyFieldNullOrZero() {
+ return f1 == null;
+ }
+
+ public void bdbWritePriKeyField(EntityOutput output, Format format) {
+ output.writeKeyObject(f1, format);
+ }
+
+ public void bdbReadPriKeyField(EntityInput input, Format format) {
+ f1 = (String) input.readKeyObject(format);
+ }
+
+ public void bdbWriteSecKeyFields(EntityOutput output) {
+ /* If primary key is an object: */
+ output.registerPriKeyObject(f1);
+ /* Always: */
+ output.writeInt(f2);
+ output.writeObject(f3, null);
+ output.writeObject(f4, null);
+ }
+
+ public void bdbReadSecKeyFields(EntityInput input,
+ int startField,
+ int endField,
+ int superLevel) {
+ /* If primary key is an object: */
+ input.registerPriKeyObject(f1);
+
+ if (superLevel <= 0) {
+ switch (startField) {
+ case 0:
+ f2 = input.readInt();
+ if (endField == 0) break;
+ case 1:
+ f3 = (String) input.readObject();
+ if (endField == 1) break;
+ case 2:
+ f4 = (String) input.readObject();
+ }
+ }
+ }
+
+ public void bdbWriteNonKeyFields(EntityOutput output) {
+ output.writeInt(f5);
+ output.writeObject(f6, null);
+ output.writeObject(f7, null);
+ output.writeInt(f8);
+ output.writeInt(f9);
+ output.writeInt(f10);
+ output.writeInt(f11);
+ output.writeInt(f12);
+ }
+
+ public void bdbReadNonKeyFields(EntityInput input,
+ int startField,
+ int endField,
+ int superLevel) {
+ if (superLevel <= 0) {
+ switch (startField) {
+ case 0:
+ f5 = input.readInt();
+ if (endField == 0) break;
+ case 1:
+ f6 = (String) input.readObject();
+ if (endField == 1) break;
+ case 2:
+ f7 = (String) input.readObject();
+ if (endField == 2) break;
+ case 3:
+ f8 = input.readInt();
+ if (endField == 3) break;
+ case 4:
+ f9 = input.readInt();
+ if (endField == 4) break;
+ case 5:
+ f10 = input.readInt();
+ if (endField == 5) break;
+ case 6:
+ f11 = input.readInt();
+ if (endField == 6) break;
+ case 7:
+ f12 = input.readInt();
+ }
+ }
+ }
+
+ public void bdbWriteCompositeKeyFields(EntityOutput output,
+ Format[] formats) {
+ }
+
+ public void bdbReadCompositeKeyFields(EntityInput input,
+ Format[] formats) {
+ }
+
+ public boolean bdbNullifyKeyField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField,
+ Object keyElement) {
+ if (superLevel > 0) {
+ return false;
+ } else if (isSecField) {
+ switch (field) {
+ case 1:
+ if (f3 != null) {
+ f3 = null;
+ return true;
+ } else {
+ return false;
+ }
+ case 2:
+ if (f4 != null) {
+ f4 = null;
+ return true;
+ } else {
+ return false;
+ }
+ default:
+ return false;
+ }
+ } else {
+ switch (field) {
+ case 1:
+ if (f6 != null) {
+ f6 = null;
+ return true;
+ } else {
+ return false;
+ }
+ case 2:
+ if (f7 != null) {
+ f7 = null;
+ return true;
+ } else {
+ return false;
+ }
+ default:
+ return false;
+ }
+ }
+ }
+
+ public Object bdbGetField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField) {
+ if (superLevel > 0) {
+ } else if (isSecField) {
+ switch (field) {
+ case 0:
+ return Integer.valueOf(f2);
+ case 1:
+ return f3;
+ case 2:
+ return f4;
+ }
+ } else {
+ switch (field) {
+ case 0:
+ return Integer.valueOf(f5);
+ case 1:
+ return f6;
+ case 2:
+ return f7;
+ }
+ }
+ return null;
+ }
+
+ public void bdbSetField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField,
+ Object value) {
+ if (superLevel > 0) {
+ } else if (isSecField) {
+ switch (field) {
+ case 0:
+ f2 = ((Integer) value).intValue();
+ return;
+ case 1:
+ f3 = (String) value;
+ return;
+ case 2:
+ f4 = (String) value;
+ return;
+ }
+ } else {
+ switch (field) {
+ case 0:
+ f5 = ((Integer) value).intValue();
+ return;
+ case 1:
+ f6 = (String) value;
+ return;
+ case 2:
+ f7 = (String) value;
+ return;
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced2.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced2.java
new file mode 100644
index 0000000..5cc2e06
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced2.java
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import com.sleepycat.persist.impl.EnhancedAccessor;
+import com.sleepycat.persist.impl.EntityInput;
+import com.sleepycat.persist.impl.EntityOutput;
+import com.sleepycat.persist.impl.Format;
+import com.sleepycat.persist.model.Persistent;
+
+/**
+ * For running ASMifier -- entity sublcass.
+ */
+@Persistent
+class Enhanced2 extends Enhanced1 {
+
+ static {
+ EnhancedAccessor.registerClass(null, new Enhanced2());
+ }
+
+ public Object bdbNewInstance() {
+ return new Enhanced2();
+ }
+
+ public Object bdbNewArray(int len) {
+ return new Enhanced2[len];
+ }
+
+ public boolean bdbIsPriKeyFieldNullOrZero() {
+ return super.bdbIsPriKeyFieldNullOrZero();
+ }
+
+ public void bdbWritePriKeyField(EntityOutput output, Format format) {
+ super.bdbWritePriKeyField(output, format);
+ }
+
+ public void bdbReadPriKeyField(EntityInput input, Format format) {
+ super.bdbReadPriKeyField(input, format);
+ }
+
+ public void bdbWriteSecKeyFields(EntityOutput output) {
+ super.bdbWriteSecKeyFields(output);
+ }
+
+ public void bdbReadSecKeyFields(EntityInput input,
+ int startField,
+ int endField,
+ int superLevel) {
+ if (superLevel != 0) {
+ super.bdbReadSecKeyFields
+ (input, startField, endField, superLevel - 1);
+ }
+ }
+
+ public void bdbWriteNonKeyFields(EntityOutput output) {
+ super.bdbWriteNonKeyFields(output);
+ }
+
+ public void bdbReadNonKeyFields(EntityInput input,
+ int startField,
+ int endField,
+ int superLevel) {
+ if (superLevel != 0) {
+ super.bdbReadNonKeyFields
+ (input, startField, endField, superLevel - 1);
+ }
+ }
+
+ public boolean bdbNullifyKeyField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField,
+ Object keyElement) {
+ if (superLevel > 0) {
+ return super.bdbNullifyKeyField
+ (o, field, superLevel - 1, isSecField, keyElement);
+ } else {
+ return false;
+ }
+ }
+
+ public Object bdbGetField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField) {
+ if (superLevel > 0) {
+ return super.bdbGetField
+ (o, field, superLevel - 1, isSecField);
+ } else {
+ return null;
+ }
+ }
+
+ public void bdbSetField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField,
+ Object value) {
+ if (superLevel > 0) {
+ super.bdbSetField
+ (o, field, superLevel - 1, isSecField, value);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced3.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced3.java
new file mode 100644
index 0000000..5b59a29
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/Enhanced3.java
@@ -0,0 +1,176 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+/*
+import java.math.BigDecimal;
+*/
+import java.math.BigInteger;
+import java.util.Date;
+
+import com.sleepycat.persist.impl.Enhanced;
+import com.sleepycat.persist.impl.EnhancedAccessor;
+import com.sleepycat.persist.impl.EntityInput;
+import com.sleepycat.persist.impl.EntityOutput;
+import com.sleepycat.persist.impl.Format;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+
+/**
+ * For running ASMifier -- a composite key class using all simple data types,
+ * does not follow from previous EnhancedN.java files
+ */
+@Persistent
+class Enhanced3 implements Enhanced {
+
+ enum MyEnum { ONE, TWO };
+
+ @KeyField(1) boolean z;
+ @KeyField(2) char c;
+ @KeyField(3) byte b;
+ @KeyField(4) short s;
+ @KeyField(5) int i;
+ @KeyField(6) long l;
+ @KeyField(7) float f;
+ @KeyField(8) double d;
+
+ @KeyField(9) Boolean zw;
+ @KeyField(10) Character cw;
+ @KeyField(11) Byte bw;
+ @KeyField(12) Short sw;
+ @KeyField(13) Integer iw;
+ @KeyField(14) Long lw;
+ @KeyField(15) Float fw;
+ @KeyField(16) Double dw;
+
+ @KeyField(17) Date date;
+ @KeyField(18) String str;
+ @KeyField(19) MyEnum e;
+ @KeyField(20) BigInteger bigint;
+ /*
+ @KeyField(21) BigDecimal bigdec;
+ */
+
+ static {
+ EnhancedAccessor.registerClass(null, new Enhanced3());
+ }
+
+ public Object bdbNewInstance() {
+ return new Enhanced3();
+ }
+
+ public Object bdbNewArray(int len) {
+ return new Enhanced3[len];
+ }
+
+ public boolean bdbIsPriKeyFieldNullOrZero() {
+ return false;
+ }
+
+ public void bdbWritePriKeyField(EntityOutput output, Format format) {
+ }
+
+ public void bdbReadPriKeyField(EntityInput input, Format format) {
+ }
+
+ public void bdbWriteSecKeyFields(EntityOutput output) {
+ }
+
+ public void bdbReadSecKeyFields(EntityInput input,
+ int startField,
+ int endField,
+ int superLevel) {
+ }
+
+ public void bdbWriteNonKeyFields(EntityOutput output) {
+ }
+
+ public void bdbReadNonKeyFields(EntityInput input,
+ int startField,
+ int endField,
+ int superLevel) {
+ }
+
+ public void bdbWriteCompositeKeyFields(EntityOutput output,
+ Format[] formats) {
+ output.writeBoolean(z);
+ output.writeChar(c);
+ output.writeByte(b);
+ output.writeShort(s);
+ output.writeInt(i);
+ output.writeLong(l);
+ output.writeSortedFloat(f);
+ output.writeSortedDouble(d);
+
+ output.writeBoolean(zw.booleanValue());
+ output.writeChar(cw.charValue());
+ output.writeByte(bw.byteValue());
+ output.writeShort(sw.shortValue());
+ output.writeInt(iw.intValue());
+ output.writeLong(lw.longValue());
+ output.writeSortedFloat(fw.floatValue());
+ output.writeSortedDouble(dw.doubleValue());
+
+ output.writeLong(date.getTime());
+ output.writeString(str);
+ output.writeKeyObject(e, formats[18]);
+ output.writeBigInteger(bigint);
+ }
+
+ public void bdbReadCompositeKeyFields(EntityInput input,
+ Format[] formats) {
+ z = input.readBoolean();
+ c = input.readChar();
+ b = input.readByte();
+ s = input.readShort();
+ i = input.readInt();
+ l = input.readLong();
+ f = input.readSortedFloat();
+ d = input.readSortedDouble();
+
+ zw = Boolean.valueOf(input.readBoolean());
+ cw = Character.valueOf(input.readChar());
+ bw = Byte.valueOf(input.readByte());
+ sw = Short.valueOf(input.readShort());
+ iw = Integer.valueOf(input.readInt());
+ lw = Long.valueOf(input.readLong());
+ fw = Float.valueOf(input.readSortedFloat());
+ dw = Double.valueOf(input.readSortedDouble());
+
+ date = new Date(input.readLong());
+ str = input.readString();
+ e = (MyEnum) input.readKeyObject(formats[18]);
+ bigint = input.readBigInteger();
+ }
+
+ public boolean bdbNullifyKeyField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField,
+ Object keyElement) {
+ // Didn't bother with this one.
+ return false;
+ }
+
+ public Object bdbGetField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField) {
+ // Didn't bother with this one.
+ return null;
+ }
+
+ public void bdbSetField(Object o,
+ int field,
+ int superLevel,
+ boolean isSecField,
+ Object value) {
+ // Didn't bother with this one.
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveCase.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveCase.java
new file mode 100644
index 0000000..4451efa
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveCase.java
@@ -0,0 +1,205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.persist.test;
+
+import java.util.Iterator;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.model.ClassMetadata;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.persist.raw.RawType;
+
+@Persistent
+abstract class EvolveCase {
+
+ static final String STORE_NAME = "foo";
+
+ transient boolean updated;
+
+ Mutations getMutations() {
+ return null;
+ }
+
+ void configure(EntityModel model, StoreConfig config) {
+ }
+
+ String getStoreOpenException() {
+ return null;
+ }
+
+ int getNRecordsExpected() {
+ return 1;
+ }
+
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ }
+
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ }
+
+ /**
+ * @throws DatabaseException from subclasses.
+ */
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+ }
+
+ /**
+ * @throws DatabaseException from subclasses.
+ */
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+ }
+
+ /**
+ * @throws DatabaseException from subclasses.
+ */
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+ }
+
+ /**
+ * @throws DatabaseException from subclasses.
+ */
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+ }
+
+ /**
+ * Checks for equality and prints the entire values rather than
+ * abbreviated values like TestCase.assertEquals does.
+ */
+ static void checkEquals(Object expected, Object got) {
+ if ((expected != null) ? (!expected.equals(got)) : (got != null)) {
+ TestCase.fail("Expected:\n" + expected + "\nBut got:\n" + got);
+ }
+ }
+
+ /**
+ * Asserts than an entity database exists or does not exist.
+ */
+ static void assertDbExists(boolean expectExists,
+ Environment env,
+ String entityClassName) {
+ assertDbExists(expectExists, env, entityClassName, null);
+ }
+
+ /**
+ * Checks that an entity class exists or does not exist.
+ */
+ static void checkEntity(boolean exists,
+ EntityModel model,
+ Environment env,
+ String className,
+ int version,
+ String secKeyName) {
+ if (exists) {
+ TestCase.assertNotNull(model.getEntityMetadata(className));
+ ClassMetadata meta = model.getClassMetadata(className);
+ TestCase.assertNotNull(meta);
+ TestCase.assertEquals(version, meta.getVersion());
+ TestCase.assertTrue(meta.isEntityClass());
+
+ RawType raw = model.getRawType(className);
+ TestCase.assertNotNull(raw);
+ TestCase.assertEquals(version, raw.getVersion());
+ } else {
+ TestCase.assertNull(model.getEntityMetadata(className));
+ TestCase.assertNull(model.getClassMetadata(className));
+ TestCase.assertNull(model.getRawType(className));
+ }
+
+ assertDbExists(exists, env, className);
+ if (secKeyName != null) {
+ assertDbExists(exists, env, className, secKeyName);
+ }
+ }
+
+ /**
+ * Checks that a non-entity class exists or does not exist.
+ */
+ static void checkNonEntity(boolean exists,
+ EntityModel model,
+ Environment env,
+ String className,
+ int version) {
+ if (exists) {
+ ClassMetadata meta = model.getClassMetadata(className);
+ TestCase.assertNotNull(meta);
+ TestCase.assertEquals(version, meta.getVersion());
+ TestCase.assertTrue(!meta.isEntityClass());
+
+ RawType raw = model.getRawType(className);
+ TestCase.assertNotNull(raw);
+ TestCase.assertEquals(version, raw.getVersion());
+ } else {
+ TestCase.assertNull(model.getClassMetadata(className));
+ TestCase.assertNull(model.getRawType(className));
+ }
+
+ TestCase.assertNull(model.getEntityMetadata(className));
+ assertDbExists(false, env, className);
+ }
+
+ /**
+ * Asserts than a database expectExists or does not exist. If keyName is
+ * null, checks an entity database. If keyName is non-null, checks a
+ * secondary database.
+ */
+ static void assertDbExists(boolean expectExists,
+ Environment env,
+ String entityClassName,
+ String keyName) {
+ PersistTestUtils.assertDbExists
+ (expectExists, env, STORE_NAME, entityClassName, keyName);
+ }
+
+ static void checkVersions(EntityModel model, String name, int version) {
+ checkVersions(model, new String[] {name}, new int[] {version});
+ }
+
+ static void checkVersions(EntityModel model,
+ String name1,
+ int version1,
+ String name2,
+ int version2) {
+ checkVersions
+ (model, new String[] {name1, name2},
+ new int[] {version1, version2});
+ }
+
+ private static void checkVersions(EntityModel model,
+ String[] names,
+ int[] versions) {
+ List<RawType> all = model.getAllRawTypeVersions(names[0]);
+ TestCase.assertNotNull(all);
+
+ assert names.length == versions.length;
+ TestCase.assertEquals(all.toString(), names.length, all.size());
+
+ Iterator<RawType> iter = all.iterator();
+ for (int i = 0; i < names.length; i += 1) {
+ RawType type = iter.next();
+ TestCase.assertEquals(versions[i], type.getVersion());
+ TestCase.assertEquals(names[i], type.getClassName());
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java
new file mode 100644
index 0000000..058aef3
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java
@@ -0,0 +1,6818 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.math.BigInteger;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Conversion;
+import com.sleepycat.persist.evolve.Converter;
+import com.sleepycat.persist.evolve.Deleter;
+import com.sleepycat.persist.evolve.EntityConverter;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.evolve.Renamer;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.persist.raw.RawType;
+
+/**
+ * Nested classes are modified versions of classes of the same name in
+ * EvolveClasses.java.original. See EvolveTestBase.java for the steps that are
+ * taken to add a new class (test case).
+ *
+ * @author Mark Hayes
+ */
+class EvolveClasses {
+
+ private static final String PREFIX = EvolveClasses.class.getName() + '$';
+ private static final String CASECLS = EvolveCase.class.getName();
+
+ private static RawObject readRaw(RawStore store,
+ Object key,
+ Object... classVersionPairs)
+ throws DatabaseException {
+
+ return readRaw(store, null, key, classVersionPairs);
+ }
+
+ /**
+ * Reads a raw object and checks its superclass names and versions.
+ */
+ private static RawObject readRaw(RawStore store,
+ String entityClsName,
+ Object key,
+ Object... classVersionPairs)
+ throws DatabaseException {
+
+ TestCase.assertNotNull(store);
+ TestCase.assertNotNull(key);
+
+ if (entityClsName == null) {
+ entityClsName = (String) classVersionPairs[0];
+ }
+ PrimaryIndex<Object,RawObject> index =
+ store.getPrimaryIndex(entityClsName);
+ TestCase.assertNotNull(index);
+
+ RawObject obj = index.get(key);
+ TestCase.assertNotNull(obj);
+
+ checkRawType(obj.getType(), classVersionPairs);
+
+ RawObject superObj = obj.getSuper();
+ for (int i = 2; i < classVersionPairs.length; i += 2) {
+ Object[] a = new Object[classVersionPairs.length - i];
+ System.arraycopy(classVersionPairs, i, a, 0, a.length);
+ TestCase.assertNotNull(superObj);
+ checkRawType(superObj.getType(), a);
+ superObj = superObj.getSuper();
+ }
+
+ return obj;
+ }
+
+ /**
+ * Reads a raw object and checks its superclass names and versions.
+ */
+ private static void checkRawType(RawType type,
+ Object... classVersionPairs) {
+ TestCase.assertNotNull(type);
+ TestCase.assertNotNull(classVersionPairs);
+ TestCase.assertTrue(classVersionPairs.length % 2 == 0);
+
+ for (int i = 0; i < classVersionPairs.length; i += 2) {
+ String clsName = (String) classVersionPairs[i];
+ int clsVersion = (Integer) classVersionPairs[i + 1];
+ TestCase.assertEquals(clsName, type.getClassName());
+ TestCase.assertEquals(clsVersion, type.getVersion());
+ type = type.getSuperType();
+ }
+ TestCase.assertNull(type);
+ }
+
+ /**
+ * Checks that a raw object contains the specified field values. Does not
+ * check superclass fields.
+ */
+ private static void checkRawFields(RawObject obj,
+ Object... nameValuePairs) {
+ TestCase.assertNotNull(obj);
+ TestCase.assertNotNull(obj.getValues());
+ TestCase.assertNotNull(nameValuePairs);
+ TestCase.assertTrue(nameValuePairs.length % 2 == 0);
+
+ Map<String,Object> values = obj.getValues();
+ TestCase.assertEquals(nameValuePairs.length / 2, values.size());
+
+ for (int i = 0; i < nameValuePairs.length; i += 2) {
+ String name = (String) nameValuePairs[i];
+ Object value = nameValuePairs[i + 1];
+ TestCase.assertEquals(name, value, values.get(name));
+ }
+ }
+
+ private static Map<String,Object> makeValues(Object... nameValuePairs) {
+ TestCase.assertTrue(nameValuePairs.length % 2 == 0);
+ Map<String,Object> values = new HashMap<String,Object>();
+ for (int i = 0; i < nameValuePairs.length; i += 2) {
+ values.put((String) nameValuePairs[i], nameValuePairs[i + 1]);
+ }
+ return values;
+ }
+
+ /**
+ * Disallow removing an entity class when no Deleter mutation is specified.
+ */
+ static class DeletedEntity1_ClassRemoved_NoMutation extends EvolveCase {
+
+ private static final String NAME =
+ PREFIX + "DeletedEntity1_ClassRemoved";
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity1_ClassRemoved version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$DeletedEntity1_ClassRemoved";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "skey");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ /**
+ * Allow removing an entity class when a Deleter mutation is specified.
+ */
+ static class DeletedEntity2_ClassRemoved_WithDeleter extends EvolveCase {
+
+ private static final String NAME =
+ PREFIX + "DeletedEntity2_ClassRemoved";
+
+ @Override
+ int getNRecordsExpected() {
+ return 0;
+ }
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(false, model, env, NAME, 0, "skey");
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 0);
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ return;
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ /**
+ * Disallow removing the Entity annotation when no Deleter mutation is
+ * specified.
+ */
+ static class DeletedEntity3_AnnotRemoved_NoMutation extends EvolveCase {
+
+ private static final String NAME =
+ DeletedEntity3_AnnotRemoved_NoMutation.class.getName();
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity3_AnnotRemoved_NoMutation version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: com.sleepycat.persist.test.EvolveClasses$DeletedEntity3_AnnotRemoved_NoMutation";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "skey");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ /**
+ * Allow removing the Entity annotation when a Deleter mutation is
+ * specified.
+ */
+ static class DeletedEntity4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+ private static final String NAME =
+ DeletedEntity4_AnnotRemoved_WithDeleter.class.getName();
+
+ @Override
+ int getNRecordsExpected() {
+ return 0;
+ }
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(false, model, env, NAME, 0, "skey");
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate) {
+ try {
+ store.getPrimaryIndex
+ (Integer.class,
+ DeletedEntity4_AnnotRemoved_WithDeleter.class);
+ TestCase.fail();
+ } catch (Exception e) {
+ checkEquals
+ ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity4_AnnotRemoved_WithDeleter",
+ e.toString());
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ return;
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ /**
+ * Disallow changing the Entity annotation to Persistent when no Deleter
+ * mutation is specified.
+ */
+ @Persistent(version=1)
+ static class DeletedEntity5_EntityToPersist_NoMutation extends EvolveCase {
+
+ private static final String NAME =
+ DeletedEntity5_EntityToPersist_NoMutation.class.getName();
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity5_EntityToPersist_NoMutation version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity5_EntityToPersist_NoMutation version: 1 Error: @Entity switched to/from @Persistent";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "skey");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ /**
+ * Allow changing the Entity annotation to Persistent when a Deleter
+ * mutation is specified.
+ */
+ @Persistent(version=1)
+ static class DeletedEntity6_EntityToPersist_WithDeleter extends EvolveCase {
+
+ private static final String NAME =
+ DeletedEntity6_EntityToPersist_WithDeleter.class.getName();
+ private static final String NAME2 =
+ Embed_DeletedEntity6_EntityToPersist_WithDeleter.class.getName();
+
+ @Override
+ int getNRecordsExpected() {
+ return 0;
+ }
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkNonEntity(true, model, env, NAME, 1);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ /* Cannot get the primary index for the former entity class. */
+ try {
+ store.getPrimaryIndex
+ (Integer.class,
+ DeletedEntity6_EntityToPersist_WithDeleter.class);
+ TestCase.fail();
+ } catch (Exception e) {
+ checkEquals
+ ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity6_EntityToPersist_WithDeleter",
+ e.toString());
+ }
+
+ /* Can embed the now persistent class in another entity class. */
+ PrimaryIndex<Long,
+ Embed_DeletedEntity6_EntityToPersist_WithDeleter>
+ index = store.getPrimaryIndex
+ (Long.class,
+ Embed_DeletedEntity6_EntityToPersist_WithDeleter.class);
+
+ if (doUpdate) {
+ Embed_DeletedEntity6_EntityToPersist_WithDeleter embed =
+ new Embed_DeletedEntity6_EntityToPersist_WithDeleter();
+ index.put(embed);
+ embed = index.get(embed.key);
+ /* This new type should exist only after update. */
+ Environment env = store.getEnvironment();
+ EntityModel model = store.getModel();
+ checkEntity(true, model, env, NAME2, 0, null);
+ checkVersions(model, NAME2, 0);
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ return;
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ @Entity
+ static class Embed_DeletedEntity6_EntityToPersist_WithDeleter {
+
+ @PrimaryKey
+ long key = 99;
+
+ DeletedEntity6_EntityToPersist_WithDeleter embedded =
+ new DeletedEntity6_EntityToPersist_WithDeleter();
+ }
+
+ /**
+ * Disallow removing a Persistent class when no Deleter mutation is
+ * specified, even when the Entity class that embedded the Persistent class
+ * is deleted properly (by removing the Entity annotation in this case).
+ */
+ static class DeletedPersist1_ClassRemoved_NoMutation extends EvolveCase {
+
+ private static final String NAME =
+ PREFIX + "DeletedPersist1_ClassRemoved";
+
+ private static final String NAME2 =
+ DeletedPersist1_ClassRemoved_NoMutation.class.getName();
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME2, 0));
+ return m;
+ }
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist1_ClassRemoved version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$DeletedPersist1_ClassRemoved";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkNonEntity(true, model, env, NAME, 0);
+ checkEntity(true, model, env, NAME2, 0, null);
+ checkVersions(model, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+
+ RawType embedType = store.getModel().getRawType(NAME);
+ checkRawType(embedType, NAME, 0);
+
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), null);
+
+ RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ /**
+ * Allow removing a Persistent class when a Deleter mutation is
+ * specified, and the Entity class that embedded the Persistent class
+ * is also deleted properly (by removing the Entity annotation in this
+ * case).
+ */
+ static class DeletedPersist2_ClassRemoved_WithDeleter extends EvolveCase {
+
+ private static final String NAME =
+ PREFIX + "DeletedPersist2_ClassRemoved";
+ private static final String NAME2 =
+ DeletedPersist2_ClassRemoved_WithDeleter.class.getName();
+
+ @Override
+ int getNRecordsExpected() {
+ return 0;
+ }
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0));
+ m.addDeleter(new Deleter(NAME2, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkNonEntity(false, model, env, NAME, 0);
+ checkEntity(false, model, env, NAME2, 0, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate) {
+ try {
+ store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist2_ClassRemoved_WithDeleter.class);
+ TestCase.fail();
+ } catch (Exception e) {
+ checkEquals
+ ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist2_ClassRemoved_WithDeleter",
+ e.toString());
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ return;
+ }
+
+ RawType embedType = store.getModel().getRawType(NAME);
+ checkRawType(embedType, NAME, 0);
+
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), null);
+
+ RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ static class DeletedPersist3_AnnotRemoved {
+
+ int f = 123;
+ }
+
+ /**
+ * Disallow removing the Persistent annotation when no Deleter mutation is
+ * specified, even when the Entity class that embedded the Persistent class
+ * is deleted properly (by removing the Entity annotation in this case).
+ */
+ static class DeletedPersist3_AnnotRemoved_NoMutation extends EvolveCase {
+
+ private static final String NAME =
+ DeletedPersist3_AnnotRemoved.class.getName();
+ private static final String NAME2 =
+ DeletedPersist3_AnnotRemoved_NoMutation.class.getName();
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME2, 0));
+ return m;
+ }
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist3_AnnotRemoved version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: com.sleepycat.persist.test.EvolveClasses$DeletedPersist3_AnnotRemoved";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkNonEntity(true, model, env, NAME, 0);
+ checkEntity(true, model, env, NAME2, 0, null);
+ checkVersions(model, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+
+ RawType embedType = store.getModel().getRawType(NAME);
+ checkRawType(embedType, NAME, 0);
+
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), null);
+
+ RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ static class DeletedPersist4_AnnotRemoved {
+
+ int f = 123;
+ }
+
+ /**
+ * Allow removing the Persistent annotation when a Deleter mutation is
+ * specified, and the Entity class that embedded the Persistent class
+ * is also be deleted properly (by removing the Entity annotation in this
+ * case).
+ */
+ static class DeletedPersist4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+ private static final String NAME =
+ DeletedPersist4_AnnotRemoved.class.getName();
+ private static final String NAME2 =
+ DeletedPersist4_AnnotRemoved_WithDeleter.class.getName();
+
+ @Override
+ int getNRecordsExpected() {
+ return 0;
+ }
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0));
+ m.addDeleter(new Deleter(NAME2, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkNonEntity(false, model, env, NAME, 0);
+ checkEntity(false, model, env, NAME2, 0, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate) {
+ try {
+ store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist4_AnnotRemoved_WithDeleter.class);
+ TestCase.fail();
+ } catch (Exception e) {
+ checkEquals
+ ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist4_AnnotRemoved_WithDeleter",
+ e.toString());
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ return;
+ }
+
+ RawType embedType = store.getModel().getRawType(NAME);
+ checkRawType(embedType, NAME, 0);
+
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), null);
+
+ RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ @Entity(version=1)
+ static class DeletedPersist5_PersistToEntity {
+
+ @PrimaryKey
+ int key = 99;
+
+ int f = 123;
+ }
+
+ /**
+ * Disallow changing the Entity annotation to Persistent when no Deleter
+ * mutation is specified, even when the Entity class that embedded the
+ * Persistent class is deleted properly (by removing the Entity annotation
+ * in this case).
+ */
+ static class DeletedPersist5_PersistToEntity_NoMutation
+ extends EvolveCase {
+
+ private static final String NAME =
+ DeletedPersist5_PersistToEntity.class.getName();
+ private static final String NAME2 =
+ DeletedPersist5_PersistToEntity_NoMutation.class.getName();
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME2, 0));
+ return m;
+ }
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist5_PersistToEntity version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist5_PersistToEntity version: 1 Error: @Entity switched to/from @Persistent";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkNonEntity(true, model, env, NAME, 0);
+ checkEntity(true, model, env, NAME2, 0, null);
+ checkVersions(model, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+
+ RawType embedType = store.getModel().getRawType(NAME);
+ checkRawType(embedType, NAME, 0);
+
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), null);
+
+ RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ @Entity(version=1)
+ static class DeletedPersist6_PersistToEntity {
+
+ @PrimaryKey
+ int key = 99;
+
+ int f = 123;
+ }
+
+ /**
+ * Allow changing the Entity annotation to Persistent when a Deleter
+ * mutation is specified, and the Entity class that embedded the Persistent
+ * class is also be deleted properly (by removing the Entity annotation in
+ * this case).
+ */
+ static class DeletedPersist6_PersistToEntity_WithDeleter
+ extends EvolveCase {
+
+ private static final String NAME =
+ DeletedPersist6_PersistToEntity.class.getName();
+ private static final String NAME2 =
+ DeletedPersist6_PersistToEntity_WithDeleter.class.getName();
+
+ @Override
+ int getNRecordsExpected() {
+ return 0;
+ }
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0));
+ m.addDeleter(new Deleter(NAME2, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(false, model, env, NAME2, 0, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ /* Cannot get the primary index for the former entity class. */
+ try {
+ store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist6_PersistToEntity_WithDeleter.class);
+ TestCase.fail();
+ } catch (Exception e) {
+ checkEquals
+ ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist6_PersistToEntity_WithDeleter",
+ e.toString());
+ }
+
+ /* Can use the primary index of the now entity class. */
+ PrimaryIndex<Integer,
+ DeletedPersist6_PersistToEntity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist6_PersistToEntity.class);
+
+ if (doUpdate) {
+ DeletedPersist6_PersistToEntity obj =
+ new DeletedPersist6_PersistToEntity();
+ index.put(obj);
+ obj = index.get(obj.key);
+ /* This new type should exist only after update. */
+ Environment env = store.getEnvironment();
+ EntityModel model = store.getModel();
+ checkEntity(true, model, env, NAME, 1, null);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,
+ DeletedPersist6_PersistToEntity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist6_PersistToEntity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((DeletedPersist6_PersistToEntity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ return;
+ }
+
+ RawType embedType = store.getModel().getRawType(NAME);
+ checkRawType(embedType, NAME, 0);
+
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), null);
+
+ RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ /**
+ * Disallow renaming an entity class without a Renamer mutation.
+ */
+ @Entity(version=1)
+ static class RenamedEntity1_NewEntityName_NoMutation
+ extends EvolveCase {
+
+ private static final String NAME =
+ PREFIX + "RenamedEntity1_NewEntityName";
+ private static final String NAME2 =
+ RenamedEntity1_NewEntityName_NoMutation.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$RenamedEntity1_NewEntityName version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$RenamedEntity1_NewEntityName";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "skey");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ /**
+ * Allow renaming an entity class with a Renamer mutation.
+ */
+ @Entity(version=1)
+ static class RenamedEntity2_NewEntityName_WithRenamer
+ extends EvolveCase {
+
+ private static final String NAME =
+ PREFIX + "RenamedEntity2_NewEntityName";
+ private static final String NAME2 =
+ RenamedEntity2_NewEntityName_WithRenamer.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addRenamer(new Renamer(NAME, 0, NAME2));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(false, model, env, NAME, 0, null);
+ checkEntity(true, model, env, NAME2, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,RenamedEntity2_NewEntityName_WithRenamer>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ RenamedEntity2_NewEntityName_WithRenamer.class);
+ RenamedEntity2_NewEntityName_WithRenamer obj = index.get(key);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.skey);
+
+ SecondaryIndex<Integer,Integer,
+ RenamedEntity2_NewEntityName_WithRenamer>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ obj = sindex.get(88);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.skey);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,RenamedEntity2_NewEntityName_WithRenamer>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ RenamedEntity2_NewEntityName_WithRenamer.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+ index.put((RenamedEntity2_NewEntityName_WithRenamer)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+ } else {
+ obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ }
+ checkRawFields(obj, "key", 99, "skey", 88);
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass1_BaseClass
+ extends EvolveCase {
+
+ int f = 123;
+ }
+
+ /**
+ * Disallow deleting a superclass from the hierarchy when the superclass
+ * has persistent fields and no Deleter or Converter is specified.
+ */
+ @Entity
+ static class DeleteSuperclass1_NoMutation
+ extends EvolveCase {
+
+ private static final String NAME =
+ DeleteSuperclass1_BaseClass.class.getName();
+ private static final String NAME2 =
+ DeleteSuperclass1_NoMutation.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_NoMutation version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_NoMutation version: 0 Error: When a superclass is removed from the class hierarchy, the superclass or all of its persistent fields must be deleted with a Deleter: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_BaseClass";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkNonEntity(true, model, env, NAME, 0);
+ checkEntity(true, model, env, NAME2, 0, null);
+ checkVersions(model, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ checkRawFields(obj.getSuper(), "f", 123);
+ checkRawFields(obj.getSuper().getSuper());
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass2_BaseClass
+ extends EvolveCase {
+
+ int f;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey;
+ }
+
+ /**
+ * Allow deleting a superclass from the hierarchy when the superclass has
+ * persistent fields and a class Converter is specified. Also check that
+ * the secondary key field in the deleted base class is handled properly.
+ */
+ @Entity(version=1)
+ static class DeleteSuperclass2_WithConverter extends EvolveCase {
+
+ private static final String NAME =
+ DeleteSuperclass2_BaseClass.class.getName();
+ private static final String NAME2 =
+ DeleteSuperclass2_WithConverter.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ int ff;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Integer skey2;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey3;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addConverter(new EntityConverter
+ (NAME2, 0, new MyConversion(),
+ Collections.singleton("skey")));
+ return m;
+ }
+
+ @SuppressWarnings("serial")
+ static class MyConversion implements Conversion {
+
+ transient RawType newType;
+
+ public void initialize(EntityModel model) {
+ newType = model.getRawType(NAME2);
+ TestCase.assertNotNull(newType);
+ }
+
+ public Object convert(Object fromValue) {
+ TestCase.assertNotNull(newType);
+ RawObject obj = (RawObject) fromValue;
+ RawObject newSuper = obj.getSuper().getSuper();
+ return new RawObject(newType, obj.getValues(), newSuper);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof MyConversion;
+ }
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME2, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ checkNonEntity(true, model, env, NAME, 0);
+ checkVersions(model, NAME, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass2_WithConverter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass2_WithConverter.class);
+ DeleteSuperclass2_WithConverter obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertSame
+ (EvolveCase.class, obj.getClass().getSuperclass());
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.ff);
+ TestCase.assertEquals(Integer.valueOf(77), obj.skey2);
+ TestCase.assertEquals(66, obj.skey3);
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass2_WithConverter>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass2_WithConverter.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+ index.put((DeleteSuperclass2_WithConverter)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+ } else {
+ obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+ }
+ checkRawFields
+ (obj, "key", 99, "ff", 88, "skey2", 77, "skey3", 66);
+ if (expectEvolved) {
+ checkRawFields(obj.getSuper());
+ } else {
+ checkRawFields(obj.getSuper(), "f", 123, "skey", 456);
+ checkRawFields(obj.getSuper().getSuper());
+ }
+ Environment env = store.getEnvironment();
+ assertDbExists(!expectEvolved, env, NAME2, "skey");
+ assertDbExists(true, env, NAME2, "skey3");
+ }
+ }
+
+ static class DeleteSuperclass3_BaseClass
+ extends EvolveCase {
+
+ int f;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey;
+ }
+
+ /**
+ * Allow deleting a superclass from the hierarchy when the superclass
+ * has persistent fields and a class Deleter is specified. Also check that
+ * the secondary key field in the deleted base class is handled properly.
+ */
+ @Entity(version=1)
+ static class DeleteSuperclass3_WithDeleter extends EvolveCase {
+
+ private static final String NAME =
+ DeleteSuperclass3_BaseClass.class.getName();
+ private static final String NAME2 =
+ DeleteSuperclass3_WithDeleter.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ int ff;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME2, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ checkNonEntity(false, model, env, NAME, 0);
+ checkVersions(model, NAME, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass3_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass3_WithDeleter.class);
+ DeleteSuperclass3_WithDeleter obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertSame
+ (EvolveCase.class, obj.getClass().getSuperclass());
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.ff);
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass3_WithDeleter>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass3_WithDeleter.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+ index.put((DeleteSuperclass3_WithDeleter)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+ } else {
+ obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+ }
+ checkRawFields(obj, "key", 99, "ff", 88);
+ if (expectEvolved) {
+ checkRawFields(obj.getSuper());
+ } else {
+ checkRawFields(obj.getSuper(), "f", 123, "skey", 456);
+ checkRawFields(obj.getSuper().getSuper());
+ }
+ Environment env = store.getEnvironment();
+ assertDbExists(!expectEvolved, env, NAME2, "skey");
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass4_BaseClass
+ extends EvolveCase {
+ }
+
+ /**
+ * Allow deleting a superclass from the hierarchy when the superclass
+ * has NO persistent fields. No mutations are needed.
+ */
+ @Entity(version=1)
+ static class DeleteSuperclass4_NoFields extends EvolveCase {
+
+ private static final String NAME =
+ DeleteSuperclass4_BaseClass.class.getName();
+ private static final String NAME2 =
+ DeleteSuperclass4_NoFields.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME2, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ checkNonEntity(true, model, env, NAME, 0);
+ checkVersions(model, NAME, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass4_NoFields>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass4_NoFields.class);
+ DeleteSuperclass4_NoFields obj = index.get(key);
+ TestCase.assertNotNull(obj);
+ TestCase.assertSame
+ (EvolveCase.class, obj.getClass().getSuperclass());
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.ff);
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass4_NoFields>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass4_NoFields.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+ index.put((DeleteSuperclass4_NoFields)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, 99, NAME2, 1, CASECLS, 0);
+ } else {
+ obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0);
+ }
+ checkRawFields(obj, "key", 99, "ff", 88);
+ checkRawFields(obj.getSuper());
+ if (expectEvolved) {
+ TestCase.assertNull(obj.getSuper().getSuper());
+ } else {
+ checkRawFields(obj.getSuper().getSuper());
+ }
+ }
+ }
+
+ @Persistent(version=1)
+ static class DeleteSuperclass5_Embedded {
+
+ int f;
+
+ @Override
+ public String toString() {
+ return "" + f;
+ }
+ }
+
+ /**
+ * Ensure that a superclass at the top of the hierarchy can be deleted. A
+ * class Deleter is used.
+ */
+ @Entity
+ static class DeleteSuperclass5_Top
+ extends EvolveCase {
+
+ private static final String NAME =
+ DeleteSuperclass5_Top.class.getName();
+ private static final String NAME2 =
+ DeleteSuperclass5_Embedded.class.getName();
+ private static final String NAME3 =
+ PREFIX + "DeleteSuperclass5_Embedded_Base";
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ DeleteSuperclass5_Embedded embed =
+ new DeleteSuperclass5_Embedded();
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME3, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkNonEntity(true, model, env, NAME2, 1);
+ checkNonEntity(false, model, env, NAME3, 0);
+ checkVersions(model, NAME, 0);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ checkVersions(model, NAME3, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass5_Top>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass5_Top.class);
+ DeleteSuperclass5_Top obj = index.get(key);
+ TestCase.assertNotNull(obj);
+ TestCase.assertNotNull(obj.embed);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.ff);
+ TestCase.assertEquals(123, obj.embed.f);
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass5_Top>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass5_Top.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((DeleteSuperclass5_Top)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType embedType = store.getModel().getRawType(NAME2);
+ RawObject embedSuper = null;
+ if (!expectEvolved) {
+ RawType embedSuperType = store.getModel().getRawType(NAME3);
+ embedSuper = new RawObject
+ (embedSuperType, makeValues("g", 456), null);
+ }
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), embedSuper);
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88, "embed", embed);
+ }
+ }
+
+ @Persistent
+ static class InsertSuperclass1_BaseClass
+ extends EvolveCase {
+
+ int f = 123;
+ }
+
+ /**
+ * Allow inserting a superclass between two existing classes in the
+ * hierarchy. No mutations are needed.
+ */
+ @Entity(version=1)
+ static class InsertSuperclass1_Between
+ extends InsertSuperclass1_BaseClass {
+
+ private static final String NAME =
+ InsertSuperclass1_BaseClass.class.getName();
+ private static final String NAME2 =
+ InsertSuperclass1_Between.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkNonEntity(true, model, env, NAME, 0);
+ checkEntity(true, model, env, NAME2, 1, null);
+ checkVersions(model, NAME, 0);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertSuperclass1_Between>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ InsertSuperclass1_Between.class);
+ InsertSuperclass1_Between obj = index.get(key);
+ TestCase.assertNotNull(obj);
+ TestCase.assertSame
+ (InsertSuperclass1_BaseClass.class,
+ obj.getClass().getSuperclass());
+ TestCase.assertSame
+ (EvolveCase.class,
+ obj.getClass().getSuperclass().getSuperclass());
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.ff);
+ TestCase.assertEquals(123, obj.f);
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertSuperclass1_Between>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ InsertSuperclass1_Between.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+ index.put((InsertSuperclass1_Between)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, 99, NAME2, 1, NAME, 0, CASECLS, 0);
+ } else {
+ obj = readRaw(store, 99, NAME2, 0, CASECLS, 0);
+ }
+ checkRawFields(obj, "key", 99, "ff", 88);
+ if (expectEvolved) {
+ if (expectUpdated) {
+ checkRawFields(obj.getSuper(), "f", 123);
+ } else {
+ checkRawFields(obj.getSuper());
+ }
+ checkRawFields(obj.getSuper().getSuper());
+ TestCase.assertNull(obj.getSuper().getSuper().getSuper());
+ } else {
+ checkRawFields(obj.getSuper());
+ TestCase.assertNull(obj.getSuper().getSuper());
+ }
+ }
+ }
+
+ @Persistent
+ static class InsertSuperclass2_Embedded_Base {
+
+ int g = 456;
+ }
+
+ @Persistent(version=1)
+ static class InsertSuperclass2_Embedded
+ extends InsertSuperclass2_Embedded_Base {
+
+ int f;
+ }
+
+ /**
+ * Allow inserting a superclass at the top of the hierarchy. No mutations
+ * are needed.
+ */
+ @Entity
+ static class InsertSuperclass2_Top
+ extends EvolveCase {
+
+ private static final String NAME =
+ InsertSuperclass2_Top.class.getName();
+ private static final String NAME2 =
+ InsertSuperclass2_Embedded.class.getName();
+ private static final String NAME3 =
+ InsertSuperclass2_Embedded_Base.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ InsertSuperclass2_Embedded embed =
+ new InsertSuperclass2_Embedded();
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkNonEntity(true, model, env, NAME2, 1);
+ checkNonEntity(true, model, env, NAME3, 0);
+ checkVersions(model, NAME, 0);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ checkVersions(model, NAME3, 0);
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertSuperclass2_Top>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ InsertSuperclass2_Top.class);
+ InsertSuperclass2_Top obj = index.get(key);
+ TestCase.assertNotNull(obj);
+ TestCase.assertNotNull(obj.embed);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.ff);
+ TestCase.assertEquals(123, obj.embed.f);
+ TestCase.assertEquals(456, obj.embed.g);
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertSuperclass2_Top>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ InsertSuperclass2_Top.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((InsertSuperclass2_Top)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType embedType = store.getModel().getRawType(NAME2);
+ RawObject embedSuper = null;
+ if (expectEvolved) {
+ RawType embedSuperType = store.getModel().getRawType(NAME3);
+ Map<String,Object> values =
+ expectUpdated ? makeValues("g", 456) : makeValues();
+ embedSuper = new RawObject(embedSuperType, values, null);
+ }
+ RawObject embed =
+ new RawObject(embedType, makeValues("f", 123), embedSuper);
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88, "embed", embed);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_PrimitiveToObject
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_PrimitiveToObject.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ String ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_PrimitiveToObject version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_PrimitiveToObject version: 1 Error: Old field type: int is not compatible with the new type: java.lang.String for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_ObjectToPrimitive
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_ObjectToPrimitive.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToPrimitive version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToPrimitive version: 1 Error: Old field type: java.lang.String is not compatible with the new type: int for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", "88");
+ }
+ }
+
+ @Persistent
+ static class MyType {
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof MyType;
+ }
+ }
+
+ @Persistent
+ static class MySubtype extends MyType {
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof MySubtype;
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_ObjectToSubtype
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_ObjectToSubtype.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ MySubtype ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToSubtype version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToSubtype version: 1 Error: Old field type: com.sleepycat.persist.test.EvolveClasses$MyType is not compatible with the new type: com.sleepycat.persist.test.EvolveClasses$MySubtype for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawType embedType = store.getModel().getRawType
+ (MyType.class.getName());
+ RawObject embed = new RawObject(embedType, makeValues(), null);
+
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", embed);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_ObjectToUnrelatedSimple
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_ObjectToUnrelatedSimple.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ String ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedSimple version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedSimple version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: java.lang.String for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_ObjectToUnrelatedOther
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_ObjectToUnrelatedOther.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ MyType ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedOther version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedOther version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: com.sleepycat.persist.test.EvolveClasses$MyType for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_byte2boolean
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_byte2boolean.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ boolean ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_byte2boolean version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_byte2boolean version: 1 Error: Old field type: byte is not compatible with the new type: boolean for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (byte) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_short2byte
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_short2byte.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ byte ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_short2byte version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_short2byte version: 1 Error: Old field type: short is not compatible with the new type: byte for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (short) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_int2short
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_int2short.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ short ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_int2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_int2short version: 1 Error: Old field type: int is not compatible with the new type: short for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_long2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_long2int.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_long2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_long2int version: 1 Error: Old field type: long is not compatible with the new type: int for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (long) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_float2long
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_float2long.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ long ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2long version: 1 Error: Old field type: float is not compatible with the new type: long for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (float) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_double2float
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_double2float.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ float ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_double2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_double2float version: 1 Error: Old field type: double is not compatible with the new type: float for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (double) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_Byte2byte
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_Byte2byte.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ byte ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Byte2byte version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Byte2byte version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: byte for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (byte) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_Character2char
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_Character2char.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ char ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Character2char version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Character2char version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: char for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (char) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_Short2short
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_Short2short.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ short ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Short2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Short2short version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: short for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (short) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_Integer2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_Integer2int.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Integer2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Integer2int version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: int for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_Long2long
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_Long2long.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ long ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Long2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Long2long version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: long for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (long) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_Float2float
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_Float2float.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ float ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Float2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Float2float version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: float for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (float) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_Double2double
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_Double2double.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ double ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Double2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Double2double version: 1 Error: Old field type: java.lang.Double is not compatible with the new type: double for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (double) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_float2BigInt
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_float2BigInt.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ BigInteger ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2BigInt version: 1 Error: Old field type: float is not compatible with the new type: java.math.BigInteger for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (float) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowNonKeyField_BigInt2long
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowNonKeyField_BigInt2long.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ long ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_BigInt2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_BigInt2long version: 1 Error: Old field type: java.math.BigInteger is not compatible with the new type: long for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", BigInteger.valueOf(88));
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_byte2short
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_byte2short.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ short ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_byte2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_byte2short version: 1 Error: Old field type: byte is not compatible with the new type: short for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (byte) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_char2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_char2int.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_char2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_char2int version: 1 Error: Old field type: char is not compatible with the new type: int for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (char) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_short2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_short2int.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_short2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_short2int version: 1 Error: Old field type: short is not compatible with the new type: int for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (short) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_int2long
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_int2long.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ long ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2long version: 1 Error: Old field type: int is not compatible with the new type: long for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_long2float
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_long2float.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ float ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_long2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_long2float version: 1 Error: Old field type: long is not compatible with the new type: float for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (long) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_float2double
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_float2double.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ double ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_float2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_float2double version: 1 Error: Old field type: float is not compatible with the new type: double for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (float) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_Byte2short2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_Byte2short2.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ short ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Byte2short2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Byte2short2 version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: short for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (byte) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_Character2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_Character2int.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Character2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Character2int version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: int for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (char) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_Short2int2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_Short2int2.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Short2int2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Short2int2 version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: int for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (short) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_Integer2long
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_Integer2long.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ long ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Integer2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Integer2long version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: long for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_Long2float2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_Long2float2.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ float ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Long2float2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Long2float2 version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: float for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (long) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_Float2double2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_Float2double2.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ double ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Float2double2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Float2double2 version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: double for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", (float) 88);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowSecKeyField_int2BigInt
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowSecKeyField_int2BigInt.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ BigInteger ff;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2BigInt version: 1 Error: Old field type: int is not compatible with the new type: java.math.BigInteger for field: ff";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, "ff");
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "ff", 88);
+ }
+ }
+
+ // ---
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_byte2short
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_byte2short.class.getName();
+
+ @PrimaryKey
+ short key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_byte2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_byte2short version: 1 Error: Old field type: byte is not compatible with the new type: short for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (byte) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_char2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_char2int.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_char2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_char2int version: 1 Error: Old field type: char is not compatible with the new type: int for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (char) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (char) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_short2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_short2int.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_short2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_short2int version: 1 Error: Old field type: short is not compatible with the new type: int for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (short) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (short) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_int2long
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_int2long.class.getName();
+
+ @PrimaryKey
+ long key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_int2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_int2long version: 1 Error: Old field type: int is not compatible with the new type: long for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_long2float
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_long2float.class.getName();
+
+ @PrimaryKey
+ float key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_long2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_long2float version: 1 Error: Old field type: long is not compatible with the new type: float for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (long) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (long) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_float2double
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_float2double.class.getName();
+
+ @PrimaryKey
+ double key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_float2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_float2double version: 1 Error: Old field type: float is not compatible with the new type: double for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (float) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (float) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_Byte2short2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_Byte2short2.class.getName();
+
+ @PrimaryKey
+ short key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Byte2short2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Byte2short2 version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: short for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (byte) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_Character2int
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_Character2int.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Character2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Character2int version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: int for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (char) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (char) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_Short2int2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_Short2int2.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Short2int2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Short2int2 version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: int for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (short) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (short) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_Integer2long
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_Integer2long.class.getName();
+
+ @PrimaryKey
+ long key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Integer2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Integer2long version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: long for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_Long2float2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_Long2float2.class.getName();
+
+ @PrimaryKey
+ float key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2float2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2float2 version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: float for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (long) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (long) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_Float2double2
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_Float2double2.class.getName();
+
+ @PrimaryKey
+ double key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Float2double2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Float2double2 version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: double for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, (float) 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", (float) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class DisallowPriKeyField_Long2BigInt
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowPriKeyField_Long2BigInt.class.getName();
+
+ @PrimaryKey
+ BigInteger key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2BigInt version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: java.math.BigInteger for field: key";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawObject obj = readRaw(store, 99L, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99L);
+ }
+ }
+
+ @Persistent(version=1)
+ static class DisallowCompositeKeyField_byte2short_Key {
+
+ @KeyField(1)
+ int f1 = 1;
+
+ @KeyField(2)
+ short f2 = 2;
+
+ @KeyField(3)
+ String f3 = "3";
+ }
+
+ @Entity
+ static class DisallowCompositeKeyField_byte2short
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowCompositeKeyField_byte2short.class.getName();
+ private static final String NAME2 =
+ DisallowCompositeKeyField_byte2short_Key.class.getName();
+
+ @PrimaryKey
+ DisallowCompositeKeyField_byte2short_Key key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowCompositeKeyField_byte2short_Key version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowCompositeKeyField_byte2short_Key version: 1 Error: Old field type: byte is not compatible with the new type: short for field: f2";
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkNonEntity(true, model, env, NAME2, 0);
+ checkVersions(model, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ if (expectEvolved) {
+ TestCase.fail();
+ }
+ RawType rawKeyType = store.getModel().getRawType(NAME2);
+ RawObject rawKey = new RawObject
+ (rawKeyType,
+ makeValues("f1", 1, "f2", (byte) 2, "f3", "3"),
+ null);
+
+ RawObject obj = readRaw(store, rawKey, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", rawKey);
+ }
+ }
+
+ @Entity(version=1)
+ static class AllowPriKeyField_byte2Byte
+ extends EvolveCase {
+
+ private static final String NAME =
+ AllowPriKeyField_byte2Byte.class.getName();
+
+ @PrimaryKey
+ Byte key = 99;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,AllowPriKeyField_byte2Byte>
+ index = store.getPrimaryIndex
+ (Byte.class,
+ AllowPriKeyField_byte2Byte.class);
+ AllowPriKeyField_byte2Byte obj = index.get(key);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(Byte.valueOf((byte) 99), obj.key);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,AllowPriKeyField_byte2Byte>
+ index = newStore.getPrimaryIndex
+ (Byte.class,
+ AllowPriKeyField_byte2Byte.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get((byte) 99);
+ index.put((AllowPriKeyField_byte2Byte)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, (byte) 99, NAME, 1, CASECLS, 0);
+ } else {
+ obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+ }
+ checkRawFields(obj, "key", (byte) 99);
+ }
+ }
+
+ @Entity(version=1)
+ static class AllowPriKeyField_Byte2byte2
+ extends EvolveCase {
+
+ private static final String NAME =
+ AllowPriKeyField_Byte2byte2.class.getName();
+
+ @PrimaryKey
+ byte key = 99;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,AllowPriKeyField_Byte2byte2>
+ index = store.getPrimaryIndex
+ (Byte.class,
+ AllowPriKeyField_Byte2byte2.class);
+ AllowPriKeyField_Byte2byte2 obj = index.get(key);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals((byte) 99, obj.key);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,AllowPriKeyField_Byte2byte2>
+ index = newStore.getPrimaryIndex
+ (Byte.class,
+ AllowPriKeyField_Byte2byte2.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get((byte) 99);
+ index.put((AllowPriKeyField_Byte2byte2)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, (byte) 99, NAME, 1, CASECLS, 0);
+ } else {
+ obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0);
+ }
+ checkRawFields(obj, "key", (byte) 99);
+ }
+ }
+
+ @Persistent(version=1)
+ static class AllowFieldTypeChanges_Key {
+
+ AllowFieldTypeChanges_Key() {
+ this(false);
+ }
+
+ AllowFieldTypeChanges_Key(boolean init) {
+ if (init) {
+ f1 = true;
+ f2 = (byte) 2;
+ f3 = (short) 3;
+ f4 = 4;
+ f5 = 5L;
+ f6 = 6F;
+ f7 = 7D;
+ f8 = (char) 8;
+ f9 = true;
+ f10 = (byte) 10;
+ f11 = (short) 11;
+ f12 = 12;
+ f13 = 13L;
+ f14 = 14F;
+ f15 = 15D;
+ f16 = (char) 16;
+ }
+ }
+
+ @KeyField(1)
+ boolean f1;
+
+ @KeyField(2)
+ byte f2;
+
+ @KeyField(3)
+ short f3;
+
+ @KeyField(4)
+ int f4;
+
+ @KeyField(5)
+ long f5;
+
+ @KeyField(6)
+ float f6;
+
+ @KeyField(7)
+ double f7;
+
+ @KeyField(8)
+ char f8;
+
+ @KeyField(9)
+ Boolean f9;
+
+ @KeyField(10)
+ Byte f10;
+
+ @KeyField(11)
+ Short f11;
+
+ @KeyField(12)
+ Integer f12;
+
+ @KeyField(13)
+ Long f13;
+
+ @KeyField(14)
+ Float f14;
+
+ @KeyField(15)
+ Double f15;
+
+ @KeyField(16)
+ Character f16;
+ }
+
+ @Persistent(version=1)
+ static class AllowFieldTypeChanges_Base
+ extends EvolveCase {
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ AllowFieldTypeChanges_Key kComposite;
+
+ Integer f_long2Integer;
+ Long f_String2Long;
+ }
+
+ /**
+ * Allow field type changes: automatic widening, supported widening,
+ * and Converter mutations. Also tests primary and secondary key field
+ * renaming.
+ */
+ @Entity(version=1)
+ static class AllowFieldTypeChanges
+ extends AllowFieldTypeChanges_Base {
+
+ private static final String NAME =
+ AllowFieldTypeChanges.class.getName();
+ private static final String NAME2 =
+ AllowFieldTypeChanges_Base.class.getName();
+ private static final String NAME3 =
+ AllowFieldTypeChanges_Key.class.getName();
+
+ @PrimaryKey
+ Integer pkeyInteger;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Boolean kBoolean;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Byte kByte;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Short kShort;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Integer kInteger;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Long kLong;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Float kFloat;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Double kDouble;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Character kCharacter;
+
+ short f01;
+ int f02;
+ long f03;
+ float f04;
+ double f06;
+ int f07;
+ long f08;
+ float f09;
+ double f10;
+ int f11;
+ long f12;
+ float f13;
+ double f14;
+ long f15;
+ float f16;
+ double f17;
+ float f18;
+ double f19;
+ double f20;
+
+ Short f21;
+ Integer f22;
+ Long f23;
+ Float f24;
+ Double f26;
+ Integer f27;
+ Long f28;
+ Float f29;
+ Double f30;
+ Integer f31;
+ Long f32;
+ Float f33;
+ Double f34;
+ Long f35;
+ Float f36;
+ Double f37;
+ Float f38;
+ Double f39;
+ Double f40;
+
+ Short f41;
+ Integer f42;
+ Long f43;
+ Float f44;
+ Double f46;
+ Integer f47;
+ Long f48;
+ Float f49;
+ Double f50;
+ Integer f51;
+ Long f52;
+ Float f53;
+ Double f54;
+ Long f55;
+ Float f56;
+ Double f57;
+ Float f58;
+ Double f59;
+ Double f60;
+
+ BigInteger f70;
+ BigInteger f71;
+ BigInteger f72;
+ BigInteger f73;
+ BigInteger f74;
+ BigInteger f75;
+ BigInteger f76;
+ BigInteger f77;
+ BigInteger f78;
+ BigInteger f79;
+
+ int f_long2int;
+ long f_String2long;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addRenamer(new Renamer(NAME, 0, "pkeyint", "pkeyInteger"));
+ m.addRenamer(new Renamer(NAME, 0, "kboolean", "kBoolean"));
+ m.addRenamer(new Renamer(NAME, 0, "kbyte", "kByte"));
+ m.addRenamer(new Renamer(NAME, 0, "kshort", "kShort"));
+ m.addRenamer(new Renamer(NAME, 0, "kint", "kInteger"));
+ m.addRenamer(new Renamer(NAME, 0, "klong", "kLong"));
+ m.addRenamer(new Renamer(NAME, 0, "kfloat", "kFloat"));
+ m.addRenamer(new Renamer(NAME, 0, "kdouble", "kDouble"));
+ m.addRenamer(new Renamer(NAME, 0, "kchar", "kCharacter"));
+ m.addRenamer(new Renamer(NAME2, 0, "kcomposite", "kComposite"));
+
+ Conversion conv1 = new MyConversion1();
+ Conversion conv2 = new MyConversion2();
+
+ m.addConverter(new Converter(NAME, 0, "f_long2int", conv1));
+ m.addConverter(new Converter(NAME, 0, "f_String2long", conv2));
+ m.addConverter(new Converter(NAME2, 0, "f_long2Integer", conv1));
+ m.addConverter(new Converter(NAME2, 0, "f_String2Long", conv2));
+ return m;
+ }
+
+ @SuppressWarnings("serial")
+ static class MyConversion1 implements Conversion {
+
+ public void initialize(EntityModel model) {}
+
+ public Object convert(Object o) {
+ return ((Long) o).intValue();
+ }
+
+ @Override
+ public boolean equals(Object other) { return true; }
+ }
+
+ @SuppressWarnings("serial")
+ static class MyConversion2 implements Conversion {
+
+ public void initialize(EntityModel model) {}
+
+ public Object convert(Object o) {
+ return Long.valueOf((String) o);
+ }
+
+ @Override
+ public boolean equals(Object other) { return true; }
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ checkNonEntity(true, model, env, NAME2, 1);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ checkVersions(model, NAME3, 1, NAME3, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 1);
+ checkVersions(model, NAME3, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowFieldTypeChanges>
+ index = store.getPrimaryIndex
+ (Integer.class, AllowFieldTypeChanges.class);
+ AllowFieldTypeChanges obj = index.get(99);
+ checkValues(obj);
+ checkSecondaries(store, index);
+
+ if (doUpdate) {
+ index.put(obj);
+ checkSecondaries(store, index);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowFieldTypeChanges>
+ index = newStore.getPrimaryIndex
+ (Integer.class, AllowFieldTypeChanges.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((AllowFieldTypeChanges)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ private void checkSecondaries(EntityStore store,
+ PrimaryIndex<Integer,
+ AllowFieldTypeChanges>
+ index)
+ throws DatabaseException {
+
+ checkValues(store.getSecondaryIndex
+ (index, Boolean.class, "kBoolean").get(true));
+ checkValues(store.getSecondaryIndex
+ (index, Byte.class, "kByte").get((byte) 77));
+ checkValues(store.getSecondaryIndex
+ (index, Short.class, "kShort").get((short) 66));
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "kInteger").get(55));
+ checkValues(store.getSecondaryIndex
+ (index, Long.class, "kLong").get((long) 44));
+ checkValues(store.getSecondaryIndex
+ (index, Float.class, "kFloat").get((float) 33));
+ checkValues(store.getSecondaryIndex
+ (index, Double.class, "kDouble").get((double) 22));
+ checkValues(store.getSecondaryIndex
+ (index, Character.class, "kCharacter").get((char) 11));
+ checkValues(store.getSecondaryIndex
+ (index, AllowFieldTypeChanges_Key.class, "kComposite").get
+ (new AllowFieldTypeChanges_Key(true)));
+ }
+
+ private void checkValues(AllowFieldTypeChanges obj) {
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(obj.pkeyInteger, Integer.valueOf(99));
+ TestCase.assertEquals(obj.kBoolean, Boolean.valueOf(true));
+ TestCase.assertEquals(obj.kByte, Byte.valueOf((byte) 77));
+ TestCase.assertEquals(obj.kShort, Short.valueOf((short) 66));
+ TestCase.assertEquals(obj.kInteger, Integer.valueOf(55));
+ TestCase.assertEquals(obj.kLong, Long.valueOf(44));
+ TestCase.assertEquals(obj.kFloat, Float.valueOf(33));
+ TestCase.assertEquals(obj.kDouble, Double.valueOf(22));
+ TestCase.assertEquals(obj.kCharacter, Character.valueOf((char) 11));
+
+ AllowFieldTypeChanges_Key embed = obj.kComposite;
+ TestCase.assertNotNull(embed);
+ TestCase.assertEquals(embed.f1, true);
+ TestCase.assertEquals(embed.f2, (byte) 2);
+ TestCase.assertEquals(embed.f3, (short) 3);
+ TestCase.assertEquals(embed.f4, 4);
+ TestCase.assertEquals(embed.f5, 5L);
+ TestCase.assertEquals(embed.f6, 6F);
+ TestCase.assertEquals(embed.f7, 7D);
+ TestCase.assertEquals(embed.f8, (char) 8);
+ TestCase.assertEquals(embed.f9, Boolean.valueOf(true));
+ TestCase.assertEquals(embed.f10, Byte.valueOf((byte) 10));
+ TestCase.assertEquals(embed.f11, Short.valueOf((short) 11));
+ TestCase.assertEquals(embed.f12, Integer.valueOf(12));
+ TestCase.assertEquals(embed.f13, Long.valueOf(13L));
+ TestCase.assertEquals(embed.f14, Float.valueOf(14F));
+ TestCase.assertEquals(embed.f15, Double.valueOf(15D));
+ TestCase.assertEquals(embed.f16, Character.valueOf((char) 16));
+
+ TestCase.assertEquals(obj.f01, (short) 1);
+ TestCase.assertEquals(obj.f02, 2);
+ TestCase.assertEquals(obj.f03, 3);
+ TestCase.assertEquals(obj.f04, (float) 4);
+ TestCase.assertEquals(obj.f06, (double) 6);
+ TestCase.assertEquals(obj.f07, 7);
+ TestCase.assertEquals(obj.f08, 8);
+ TestCase.assertEquals(obj.f09, (float) 9);
+ TestCase.assertEquals(obj.f10, (double) 10);
+ TestCase.assertEquals(obj.f11, 11);
+ TestCase.assertEquals(obj.f12, 12);
+ TestCase.assertEquals(obj.f13, (float) 13);
+ TestCase.assertEquals(obj.f14, (double) 14);
+ TestCase.assertEquals(obj.f15, 15L);
+ TestCase.assertEquals(obj.f16, 16F);
+ TestCase.assertEquals(obj.f17, 17D);
+ TestCase.assertEquals(obj.f18, (float) 18);
+ TestCase.assertEquals(obj.f19, (double) 19);
+ TestCase.assertEquals(obj.f20, (double) 20);
+
+ TestCase.assertEquals(obj.f21, Short.valueOf((byte) 21));
+ TestCase.assertEquals(obj.f22, Integer.valueOf((byte) 22));
+ TestCase.assertEquals(obj.f23, Long.valueOf((byte) 23));
+ TestCase.assertEquals(obj.f24, Float.valueOf((byte) 24));
+ TestCase.assertEquals(obj.f26, Double.valueOf((byte) 26));
+ TestCase.assertEquals(obj.f27, Integer.valueOf((short) 27));
+ TestCase.assertEquals(obj.f28, Long.valueOf((short) 28));
+ TestCase.assertEquals(obj.f29, Float.valueOf((short) 29));
+ TestCase.assertEquals(obj.f30, Double.valueOf((short) 30));
+ TestCase.assertEquals(obj.f31, Integer.valueOf((char) 31));
+ TestCase.assertEquals(obj.f32, Long.valueOf((char) 32));
+ TestCase.assertEquals(obj.f33, Float.valueOf((char) 33));
+ TestCase.assertEquals(obj.f34, Double.valueOf((char) 34));
+ TestCase.assertEquals(obj.f35, Long.valueOf(35));
+ TestCase.assertEquals(obj.f36, Float.valueOf(36));
+ TestCase.assertEquals(obj.f37, Double.valueOf(37));
+ TestCase.assertEquals(obj.f38, Float.valueOf(38));
+ TestCase.assertEquals(obj.f39, Double.valueOf(39));
+ TestCase.assertEquals(obj.f40, Double.valueOf(40));
+
+ TestCase.assertEquals(obj.f41, Short.valueOf((byte) 41));
+ TestCase.assertEquals(obj.f42, Integer.valueOf((byte) 42));
+ TestCase.assertEquals(obj.f43, Long.valueOf((byte) 43));
+ TestCase.assertEquals(obj.f44, Float.valueOf((byte) 44));
+ TestCase.assertEquals(obj.f46, Double.valueOf((byte) 46));
+ TestCase.assertEquals(obj.f47, Integer.valueOf((short) 47));
+ TestCase.assertEquals(obj.f48, Long.valueOf((short) 48));
+ TestCase.assertEquals(obj.f49, Float.valueOf((short) 49));
+ TestCase.assertEquals(obj.f50, Double.valueOf((short) 50));
+ TestCase.assertEquals(obj.f51, Integer.valueOf((char) 51));
+ TestCase.assertEquals(obj.f52, Long.valueOf((char) 52));
+ TestCase.assertEquals(obj.f53, Float.valueOf((char) 53));
+ TestCase.assertEquals(obj.f54, Double.valueOf((char) 54));
+ TestCase.assertEquals(obj.f55, Long.valueOf(55));
+ TestCase.assertEquals(obj.f56, Float.valueOf(56));
+ TestCase.assertEquals(obj.f57, Double.valueOf(57));
+ TestCase.assertEquals(obj.f58, Float.valueOf(58));
+ TestCase.assertEquals(obj.f59, Double.valueOf(59));
+ TestCase.assertEquals(obj.f60, Double.valueOf(60));
+
+ TestCase.assertEquals(obj.f70, BigInteger.valueOf(70));
+ TestCase.assertEquals(obj.f71, BigInteger.valueOf(71));
+ TestCase.assertEquals(obj.f72, BigInteger.valueOf(72));
+ TestCase.assertEquals(obj.f73, BigInteger.valueOf(73));
+ TestCase.assertEquals(obj.f74, BigInteger.valueOf(74));
+ TestCase.assertEquals(obj.f75, BigInteger.valueOf(75));
+ TestCase.assertEquals(obj.f76, BigInteger.valueOf(76));
+ TestCase.assertEquals(obj.f77, BigInteger.valueOf(77));
+ TestCase.assertEquals(obj.f78, BigInteger.valueOf(78));
+ TestCase.assertEquals(obj.f79, BigInteger.valueOf(79));
+
+ TestCase.assertEquals(obj.f_long2Integer, Integer.valueOf(111));
+ TestCase.assertEquals(obj.f_String2Long, Long.valueOf(222));
+ TestCase.assertEquals(obj.f_long2int, 333);
+ TestCase.assertEquals(obj.f_String2long, 444L);
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType embedType = store.getModel().getRawType(NAME3);
+ RawObject embed = new RawObject
+ (embedType,
+ makeValues
+ ("f1", true,
+ "f2", (byte) 2,
+ "f3", (short) 3,
+ "f4", 4,
+ "f5", 5L,
+ "f6", 6F,
+ "f7", 7D,
+ "f8", (char) 8,
+ "f9", true,
+ "f10", (byte) 10,
+ "f11", (short) 11,
+ "f12", 12,
+ "f13", 13L,
+ "f14", 14F,
+ "f15", 15D,
+ "f16", (char) 16),
+ null);
+
+ RawObject obj;
+ if (expectEvolved) {
+ obj = readRaw(store, 99, NAME, 1, NAME2, 1, CASECLS, 0);
+ checkRawFields(obj, "pkeyInteger", 99,
+ "kBoolean", true,
+ "kByte", (byte) 77,
+ "kShort", (short) 66,
+ "kInteger", 55,
+ "kLong", (long) 44,
+ "kFloat", (float) 33,
+ "kDouble", (double) 22,
+ "kCharacter", (char) 11,
+
+ "f01", (short) 1,
+ "f02", 2,
+ "f03", (long) 3,
+ "f04", (float) 4,
+ "f06", (double) 6,
+ "f07", 7,
+ "f08", (long) 8,
+ "f09", (float) 9,
+ "f10", (double) 10,
+ "f11", 11,
+ "f12", (long) 12,
+ "f13", (float) 13,
+ "f14", (double) 14,
+ "f15", 15L,
+ "f16", 16F,
+ "f17", 17D,
+ "f18", (float) 18,
+ "f19", (double) 19,
+ "f20", (double) 20,
+
+ "f21", (short) 21,
+ "f22", 22,
+ "f23", (long) 23,
+ "f24", (float) 24,
+ "f26", (double) 26,
+ "f27", 27,
+ "f28", (long) 28,
+ "f29", (float) 29,
+ "f30", (double) 30,
+ "f31", 31,
+ "f32", (long) 32,
+ "f33", (float) 33,
+ "f34", (double) 34,
+ "f35", 35L,
+ "f36", 36F,
+ "f37", 37D,
+ "f38", (float) 38,
+ "f39", (double) 39,
+ "f40", (double) 40,
+
+ "f41", (short) 41,
+ "f42", 42,
+ "f43", (long) 43,
+ "f44", (float) 44,
+ "f46", (double) 46,
+ "f47", 47,
+ "f48", (long) 48,
+ "f49", (float) 49,
+ "f50", (double) 50,
+ "f51", 51,
+ "f52", (long) 52,
+ "f53", (float) 53,
+ "f54", (double) 54,
+ "f55", 55L,
+ "f56", 56F,
+ "f57", 57D,
+ "f58", (float) 58,
+ "f59", (double) 59,
+ "f60", (double) 60,
+
+ "f70", BigInteger.valueOf(70),
+ "f71", BigInteger.valueOf(71),
+ "f72", BigInteger.valueOf(72),
+ "f73", BigInteger.valueOf(73),
+ "f74", BigInteger.valueOf(74),
+ "f75", BigInteger.valueOf(75),
+ "f76", BigInteger.valueOf(76),
+ "f77", BigInteger.valueOf(77),
+ "f78", BigInteger.valueOf(78),
+ "f79", BigInteger.valueOf(79),
+
+ "f_long2int", 333,
+ "f_String2long", 444L);
+ checkRawFields(obj.getSuper(),
+ "kComposite", embed,
+ "f_long2Integer", 111,
+ "f_String2Long", 222L);
+ } else {
+ obj = readRaw(store, 99, NAME, 0, NAME2, 0, CASECLS, 0);
+ checkRawFields(obj, "pkeyint", 99,
+ "kboolean", true,
+ "kbyte", (byte) 77,
+ "kshort", (short) 66,
+ "kint", 55,
+ "klong", (long) 44,
+ "kfloat", (float) 33,
+ "kdouble", (double) 22,
+ "kchar", (char) 11,
+
+ "f01", (byte) 1,
+ "f02", (byte) 2,
+ "f03", (byte) 3,
+ "f04", (byte) 4,
+ "f06", (byte) 6,
+ "f07", (short) 7,
+ "f08", (short) 8,
+ "f09", (short) 9,
+ "f10", (short) 10,
+ "f11", (char) 11,
+ "f12", (char) 12,
+ "f13", (char) 13,
+ "f14", (char) 14,
+ "f15", 15,
+ "f16", 16,
+ "f17", 17,
+ "f18", (long) 18,
+ "f19", (long) 19,
+ "f20", (float) 20,
+
+ "f21", (byte) 21,
+ "f22", (byte) 22,
+ "f23", (byte) 23,
+ "f24", (byte) 24,
+ "f26", (byte) 26,
+ "f27", (short) 27,
+ "f28", (short) 28,
+ "f29", (short) 29,
+ "f30", (short) 30,
+ "f31", (char) 31,
+ "f32", (char) 32,
+ "f33", (char) 33,
+ "f34", (char) 34,
+ "f35", 35,
+ "f36", 36,
+ "f37", 37,
+ "f38", (long) 38,
+ "f39", (long) 39,
+ "f40", (float) 40,
+
+ "f41", (byte) 41,
+ "f42", (byte) 42,
+ "f43", (byte) 43,
+ "f44", (byte) 44,
+ "f46", (byte) 46,
+ "f47", (short) 47,
+ "f48", (short) 48,
+ "f49", (short) 49,
+ "f50", (short) 50,
+ "f51", (char) 51,
+ "f52", (char) 52,
+ "f53", (char) 53,
+ "f54", (char) 54,
+ "f55", 55,
+ "f56", 56,
+ "f57", 57,
+ "f58", (long) 58,
+ "f59", (long) 59,
+ "f60", (float) 60,
+
+ "f70", (byte) 70,
+ "f71", (short) 71,
+ "f72", (char) 72,
+ "f73", 73,
+ "f74", (long) 74,
+ "f75", (byte) 75,
+ "f76", (short) 76,
+ "f77", (char) 77,
+ "f78", 78,
+ "f79", (long) 79,
+
+ "f_long2int", 333L,
+ "f_String2long", "444");
+
+ checkRawFields(obj.getSuper(),
+ "kcomposite", embed,
+ "f_long2Integer", 111L,
+ "f_String2Long", "222");
+ }
+ Environment env = store.getEnvironment();
+
+ assertDbExists(expectEvolved, env, NAME, "kBoolean");
+ assertDbExists(expectEvolved, env, NAME, "kByte");
+ assertDbExists(expectEvolved, env, NAME, "kShort");
+ assertDbExists(expectEvolved, env, NAME, "kInteger");
+ assertDbExists(expectEvolved, env, NAME, "kLong");
+ assertDbExists(expectEvolved, env, NAME, "kFloat");
+ assertDbExists(expectEvolved, env, NAME, "kDouble");
+ assertDbExists(expectEvolved, env, NAME, "kCharacter");
+ assertDbExists(expectEvolved, env, NAME, "kComposite");
+
+ assertDbExists(!expectEvolved, env, NAME, "kboolean");
+ assertDbExists(!expectEvolved, env, NAME, "kbyte");
+ assertDbExists(!expectEvolved, env, NAME, "kshort");
+ assertDbExists(!expectEvolved, env, NAME, "kint");
+ assertDbExists(!expectEvolved, env, NAME, "klong");
+ assertDbExists(!expectEvolved, env, NAME, "kfloat");
+ assertDbExists(!expectEvolved, env, NAME, "kdouble");
+ assertDbExists(!expectEvolved, env, NAME, "kchar");
+ assertDbExists(!expectEvolved, env, NAME, "kcomposite");
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static class ConvertFieldContent_Conversion implements Conversion {
+
+ public void initialize(EntityModel model) {
+ }
+
+ public Object convert(Object fromValue) {
+ String s1 = (String) fromValue;
+ return (new StringBuilder(s1)).reverse().toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof ConvertFieldContent_Conversion;
+ }
+ }
+
+ @Entity(version=1)
+ static class ConvertFieldContent_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ ConvertFieldContent_Entity.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ String f1;
+ String f2;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Converter converter = new Converter
+ (ConvertFieldContent_Entity.class.getName(), 0,
+ "f1", new ConvertFieldContent_Conversion());
+ m.addConverter(converter);
+ converter = new Converter
+ (ConvertFieldContent_Entity.class.getName(), 0,
+ "f2", new ConvertFieldContent_Conversion());
+ m.addConverter(converter);
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertFieldContent_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ConvertFieldContent_Entity.class);
+ ConvertFieldContent_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals("43210", obj.f1);
+ TestCase.assertEquals("98765", obj.f2);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertFieldContent_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ConvertFieldContent_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ConvertFieldContent_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj =
+ readRaw(store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ if (expectEvolved) {
+ checkRawFields(obj, "key", 99,
+ "f1", "43210",
+ "f2", "98765");
+ } else {
+ checkRawFields(obj, "key", 99,
+ "f1", "01234",
+ "f2", "56789");
+ }
+ }
+ }
+
+ @Persistent(version=1)
+ static class ConvertExample1_Address {
+ String street;
+ String city;
+ String state;
+ int zipCode;
+ }
+
+ @SuppressWarnings("serial")
+ static class ConvertExample1_Conversion implements Conversion {
+
+ public void initialize(EntityModel model) {
+ }
+
+ public Object convert(Object fromValue) {
+ return Integer.valueOf((String) fromValue);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof ConvertExample1_Conversion;
+ }
+ }
+
+ @Entity
+ static class ConvertExample1_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ ConvertExample1_Entity.class.getName();
+ private static final String NAME2 =
+ ConvertExample1_Address.class.getName();
+
+ @PrimaryKey
+ int key = 99;
+
+ ConvertExample1_Address embed;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Converter converter = new Converter
+ (ConvertExample1_Address.class.getName(), 0,
+ "zipCode", new ConvertExample1_Conversion());
+ m.addConverter(converter);
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample1_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ConvertExample1_Entity.class);
+ ConvertExample1_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertNotNull(obj.embed);
+ TestCase.assertEquals("street", obj.embed.street);
+ TestCase.assertEquals("city", obj.embed.city);
+ TestCase.assertEquals("state", obj.embed.state);
+ TestCase.assertEquals(12345, obj.embed.zipCode);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample1_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ConvertExample1_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ConvertExample1_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType embedType = store.getModel().getRawType(NAME2);
+ RawObject embed;
+ if (expectEvolved) {
+ embed = new RawObject
+ (embedType,
+ makeValues("street", "street",
+ "city", "city",
+ "state", "state",
+ "zipCode", 12345),
+ null);
+ } else {
+ embed = new RawObject
+ (embedType,
+ makeValues("street", "street",
+ "city", "city",
+ "state", "state",
+ "zipCode", "12345"),
+ null);
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ @Persistent
+ static class ConvertExample2_Address {
+ String street;
+ String city;
+ String state;
+ int zipCode;
+ }
+
+ @Entity(version=1)
+ static class ConvertExample2_Person
+ extends EvolveCase {
+
+ private static final String NAME =
+ ConvertExample2_Person.class.getName();
+ private static final String NAME2 =
+ ConvertExample2_Address .class.getName();
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample2_Address address;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Converter converter = new Converter
+ (ConvertExample2_Person.class.getName(), 0,
+ "address", new ConvertExample2_Conversion());
+ m.addConverter(converter);
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ checkVersions(model, NAME2, 0);
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample2_Person>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ConvertExample2_Person.class);
+ ConvertExample2_Person obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertNotNull(obj.address);
+ TestCase.assertEquals("street", obj.address.street);
+ TestCase.assertEquals("city", obj.address.city);
+ TestCase.assertEquals("state", obj.address.state);
+ TestCase.assertEquals(12345, obj.address.zipCode);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample2_Person>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ConvertExample2_Person.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ConvertExample2_Person)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ Object embed;
+ if (expectEvolved) {
+ RawType embedType = store.getModel().getRawType(NAME2);
+ embed = new RawObject
+ (embedType,
+ makeValues("street", "street",
+ "city", "city",
+ "state", "state",
+ "zipCode", 12345),
+ null);
+ } else {
+ embed = "street#city#state#12345";
+ }
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "address", embed);
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static class ConvertExample2_Conversion implements Conversion {
+ private transient RawType addressType;
+
+ public void initialize(EntityModel model) {
+ addressType = model.getRawType
+ (ConvertExample2_Address.class.getName());
+ }
+
+ public Object convert(Object fromValue) {
+
+ String oldAddress = (String) fromValue;
+ Map<String,Object> addressValues = new HashMap<String,Object>();
+ addressValues.put("street", parseAddress(1, oldAddress));
+ addressValues.put("city", parseAddress(2, oldAddress));
+ addressValues.put("state", parseAddress(3, oldAddress));
+ addressValues.put("zipCode",
+ Integer.valueOf(parseAddress(4, oldAddress)));
+
+ return new RawObject(addressType, addressValues, null);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof ConvertExample2_Conversion;
+ }
+
+ private String parseAddress(int fieldNum, String oldAddress) {
+ StringTokenizer tokens = new StringTokenizer(oldAddress, "#");
+ String field = null;
+ for (int i = 0; i < fieldNum; i += 1) {
+ field = tokens.nextToken();
+ }
+ return field;
+ }
+ }
+
+ @Persistent
+ static class ConvertExample3_Address {
+ String street;
+ String city;
+ String state;
+ int zipCode;
+ }
+
+ @SuppressWarnings("serial")
+ static class ConvertExample3_Conversion implements Conversion {
+ private transient RawType newPersonType;
+ private transient RawType addressType;
+
+ public void initialize(EntityModel model) {
+ newPersonType = model.getRawType
+ (ConvertExample3_Person.class.getName());
+ addressType = model.getRawType
+ (ConvertExample3_Address.class.getName());
+ }
+
+ public Object convert(Object fromValue) {
+
+ RawObject person = (RawObject) fromValue;
+ Map<String,Object> personValues = person.getValues();
+ Map<String,Object> addressValues = new HashMap<String,Object>();
+ RawObject address = new RawObject
+ (addressType, addressValues, null);
+
+ addressValues.put("street", personValues.remove("street"));
+ addressValues.put("city", personValues.remove("city"));
+ addressValues.put("state", personValues.remove("state"));
+ addressValues.put("zipCode", personValues.remove("zipCode"));
+ personValues.put("address", address);
+
+ return new RawObject
+ (newPersonType, personValues, person.getSuper());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof ConvertExample3_Conversion;
+ }
+ }
+
+ @Entity(version=1)
+ static class ConvertExample3_Person
+ extends EvolveCase {
+
+ private static final String NAME =
+ ConvertExample3_Person.class.getName();
+ private static final String NAME2 =
+ ConvertExample3_Address .class.getName();
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample3_Address address;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Converter converter = new Converter
+ (ConvertExample3_Person.class.getName(), 0,
+ new ConvertExample3_Conversion());
+ m.addConverter(converter);
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ checkVersions(model, NAME2, 0);
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample3_Person>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ConvertExample3_Person.class);
+ ConvertExample3_Person obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertNotNull(obj.address);
+ TestCase.assertEquals("street", obj.address.street);
+ TestCase.assertEquals("city", obj.address.city);
+ TestCase.assertEquals("state", obj.address.state);
+ TestCase.assertEquals(12345, obj.address.zipCode);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample3_Person>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ConvertExample3_Person.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ConvertExample3_Person)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ if (expectEvolved) {
+ RawType embedType = store.getModel().getRawType(NAME2);
+ Object embed = new RawObject
+ (embedType,
+ makeValues("street", "street",
+ "city", "city",
+ "state", "state",
+ "zipCode", 12345),
+ null);
+ checkRawFields(obj, "key", 99, "address", embed);
+ } else {
+ checkRawFields(obj, "key", 99,
+ "street", "street",
+ "city", "city",
+ "state", "state",
+ "zipCode", 12345);
+ }
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static class ConvertExample3Reverse_Conversion implements Conversion {
+ private transient RawType newPersonType;
+
+ public void initialize(EntityModel model) {
+ newPersonType = model.getRawType
+ (ConvertExample3Reverse_Person.class.getName());
+ }
+
+ public Object convert(Object fromValue) {
+
+ RawObject person = (RawObject) fromValue;
+ Map<String,Object> personValues = person.getValues();
+ RawObject address = (RawObject) personValues.remove("address");
+ Map<String,Object> addressValues = address.getValues();
+
+ personValues.put("street", addressValues.remove("street"));
+ personValues.put("city", addressValues.remove("city"));
+ personValues.put("state", addressValues.remove("state"));
+ personValues.put("zipCode", addressValues.remove("zipCode"));
+
+ return new RawObject
+ (newPersonType, personValues, person.getSuper());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof ConvertExample3Reverse_Conversion;
+ }
+ }
+
+ @Entity(version=1)
+ static class ConvertExample3Reverse_Person
+ extends EvolveCase {
+
+ private static final String NAME =
+ ConvertExample3Reverse_Person.class.getName();
+ private static final String NAME2 =
+ PREFIX + "ConvertExample3Reverse_Address";
+
+ @PrimaryKey
+ int key;
+
+ String street;
+ String city;
+ String state;
+ int zipCode;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Converter converter = new Converter
+ (ConvertExample3Reverse_Person.class.getName(), 0,
+ new ConvertExample3Reverse_Conversion());
+ m.addConverter(converter);
+ m.addDeleter(new Deleter(NAME2, 0));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample3Reverse_Person>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ConvertExample3Reverse_Person.class);
+ ConvertExample3Reverse_Person obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals("street", obj.street);
+ TestCase.assertEquals("city", obj.city);
+ TestCase.assertEquals("state", obj.state);
+ TestCase.assertEquals(12345, obj.zipCode);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample3Reverse_Person>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ConvertExample3Reverse_Person.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ConvertExample3Reverse_Person)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ if (expectEvolved) {
+ checkRawFields(obj, "key", 99,
+ "street", "street",
+ "city", "city",
+ "state", "state",
+ "zipCode", 12345);
+ } else {
+ RawType embedType = store.getModel().getRawType(NAME2);
+ Object embed = new RawObject
+ (embedType,
+ makeValues("street", "street",
+ "city", "city",
+ "state", "state",
+ "zipCode", 12345),
+ null);
+ checkRawFields(obj, "key", 99, "address", embed);
+ }
+ }
+ }
+
+ @Persistent(version=1)
+ static class ConvertExample4_A extends ConvertExample4_B {
+ }
+
+ @Persistent(version=1)
+ static class ConvertExample4_B {
+ String name;
+ }
+
+ @SuppressWarnings("serial")
+ static class Example4_Conversion implements Conversion {
+ private transient RawType newAType;
+ private transient RawType newBType;
+
+ public void initialize(EntityModel model) {
+ newAType = model.getRawType(ConvertExample4_A.class.getName());
+ newBType = model.getRawType(ConvertExample4_B.class.getName());
+ }
+
+ public Object convert(Object fromValue) {
+ RawObject oldA = (RawObject) fromValue;
+ RawObject oldB = oldA.getSuper();
+ Map<String,Object> aValues = oldA.getValues();
+ Map<String,Object> bValues = oldB.getValues();
+ bValues.put("name", aValues.remove("name"));
+ RawObject newB = new RawObject(newBType, bValues, oldB.getSuper());
+ RawObject newA = new RawObject(newAType, aValues, newB);
+ return newA;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof Example4_Conversion;
+ }
+ }
+
+ @Entity(version=1)
+ static class ConvertExample4_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ ConvertExample4_Entity.class.getName();
+ private static final String NAME2 =
+ ConvertExample4_A .class.getName();
+ private static final String NAME3 =
+ ConvertExample4_B .class.getName();
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample4_A embed;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Converter converter = new Converter
+ (ConvertExample4_A.class.getName(), 0,
+ new Example4_Conversion());
+ m.addConverter(converter);
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ checkVersions(model, NAME3, 1, NAME3, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 1);
+ checkVersions(model, NAME3, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample4_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ConvertExample4_Entity.class);
+ ConvertExample4_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertNotNull(obj.embed);
+ TestCase.assertEquals("name", obj.embed.name);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample4_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ConvertExample4_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ConvertExample4_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType embedTypeA = store.getModel().getRawType(NAME2);
+ RawType embedTypeB = store.getModel().getRawType(NAME3);
+ Object embed;
+ if (expectEvolved) {
+ embed = new RawObject(embedTypeA, makeValues(),
+ new RawObject
+ (embedTypeB, makeValues("name", "name"), null));
+ } else {
+ embed = new RawObject(embedTypeA, makeValues("name", "name"),
+ new RawObject
+ (embedTypeB, makeValues(), null));
+ }
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ @Persistent(version=1)
+ static class ConvertExample5_Pet {
+ String name;
+ }
+
+ @Persistent
+ static class ConvertExample5_Cat extends ConvertExample5_Pet {
+ int finickyLevel;
+ }
+
+ @Persistent
+ static class ConvertExample5_Dog extends ConvertExample5_Pet {
+ double barkVolume;
+ }
+
+ @SuppressWarnings("serial")
+ static class ConvertExample5_Conversion implements Conversion {
+ private transient RawType newPetType;
+ private transient RawType dogType;
+ private transient RawType catType;
+
+ public void initialize(EntityModel model) {
+ newPetType = model.getRawType(ConvertExample5_Pet.class.getName());
+ dogType = model.getRawType(ConvertExample5_Dog.class.getName());
+ catType = model.getRawType(ConvertExample5_Cat.class.getName());
+ }
+
+ public Object convert(Object fromValue) {
+ RawObject pet = (RawObject) fromValue;
+ Map<String,Object> petValues = pet.getValues();
+ Map<String,Object> subTypeValues = new HashMap<String,Object>();
+ Boolean isCat = (Boolean) petValues.remove("isCatNotDog");
+ Integer finickyLevel = (Integer) petValues.remove("finickyLevel");
+ Double barkVolume = (Double) petValues.remove("barkVolume");
+ RawType newSubType;
+ if (isCat) {
+ newSubType = catType;
+ subTypeValues.put("finickyLevel", finickyLevel);
+ } else {
+ newSubType = dogType;
+ subTypeValues.put("barkVolume", barkVolume);
+ }
+ RawObject newPet = new RawObject
+ (newPetType, petValues, pet.getSuper());
+ return new RawObject(newSubType, subTypeValues, newPet);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof ConvertExample5_Conversion;
+ }
+ }
+
+ @Entity(version=1)
+ static class ConvertExample5_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ ConvertExample5_Entity.class.getName();
+ private static final String NAME2 =
+ ConvertExample5_Pet.class.getName();
+ private static final String NAME3 =
+ ConvertExample5_Cat.class.getName();
+ private static final String NAME4 =
+ ConvertExample5_Dog.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample5_Cat cat;
+ ConvertExample5_Dog dog;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Converter converter = new Converter
+ (ConvertExample5_Pet.class.getName(), 0,
+ new ConvertExample5_Conversion());
+ m.addConverter(converter);
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 1);
+ }
+ checkVersions(model, NAME3, 0);
+ checkVersions(model, NAME4, 0);
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample5_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ConvertExample5_Entity.class);
+ ConvertExample5_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertNotNull(obj.cat);
+ TestCase.assertEquals("Jeffry", obj.cat.name);
+ TestCase.assertEquals(999, obj.cat.finickyLevel);
+ TestCase.assertNotNull(obj.dog);
+ TestCase.assertEquals("Nelson", obj.dog.name);
+ TestCase.assertEquals(0.01, obj.dog.barkVolume);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample5_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ConvertExample5_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ConvertExample5_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType petType = store.getModel().getRawType(NAME2);
+ RawObject cat;
+ RawObject dog;
+ if (expectEvolved) {
+ RawType catType = store.getModel().getRawType(NAME3);
+ RawType dogType = store.getModel().getRawType(NAME4);
+ cat = new RawObject(catType, makeValues("finickyLevel", 999),
+ new RawObject(petType, makeValues("name", "Jeffry"),
+ null));
+ dog = new RawObject(dogType, makeValues("barkVolume", 0.01),
+ new RawObject(petType, makeValues("name", "Nelson"),
+ null));
+ } else {
+ cat = new RawObject(petType, makeValues("name", "Jeffry",
+ "isCatNotDog", true,
+ "finickyLevel", 999,
+ "barkVolume", 0.0),
+ null);
+ dog = new RawObject(petType, makeValues("name", "Nelson",
+ "isCatNotDog", false,
+ "finickyLevel", 0,
+ "barkVolume", 0.01),
+ null);
+ }
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "cat", cat, "dog", dog);
+ }
+ }
+
+ @Persistent(version=1)
+ static class AllowFieldAddDelete_Embed {
+ private final String f0 = "0";
+ private String f2;
+ private final int f3 = 3;
+ private String f4;
+ private final int f5 = 5;
+ private final String f8 = "8";
+ private final int f9 = 9;
+ }
+
+ @Persistent(version=1)
+ static class AllowFieldAddDelete_Base
+ extends EvolveCase {
+
+ private final String f0 = "0";
+ private String f2;
+ private final int f3 = 3;
+ private String f4;
+ private final int f5 = 5;
+ private final String f8 = "8";
+ private final int f9 = 9;
+ }
+
+ @Entity(version=1)
+ static class AllowFieldAddDelete
+ extends AllowFieldAddDelete_Base {
+
+ private static final String NAME =
+ AllowFieldAddDelete.class.getName();
+ private static final String NAME2 =
+ AllowFieldAddDelete_Base.class.getName();
+ private static final String NAME3 =
+ AllowFieldAddDelete_Embed.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ AllowFieldAddDelete_Embed embed;
+
+ private final String f0 = "0";
+ private String f2;
+ private final int f3 = 3;
+ private String f4;
+ private final int f5 = 5;
+ private final String f8 = "8";
+ private final int f9 = 9;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ for (String name : new String[] {NAME, NAME2, NAME3}) {
+ m.addDeleter(new Deleter(name, 0, "f1"));
+ m.addDeleter(new Deleter(name, 0, "f6"));
+ m.addDeleter(new Deleter(name, 0, "f7"));
+ }
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ checkVersions(model, NAME3, 1, NAME3, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 1);
+ checkVersions(model, NAME3, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowFieldAddDelete>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ AllowFieldAddDelete.class);
+ AllowFieldAddDelete obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ {
+ AllowFieldAddDelete o = obj;
+
+ TestCase.assertNotNull(o);
+ TestCase.assertEquals("0", o.f0);
+ TestCase.assertEquals("2", o.f2);
+ TestCase.assertEquals(3, o.f3);
+ TestCase.assertEquals("4", o.f4);
+ TestCase.assertEquals(5, o.f5);
+ TestCase.assertEquals("8", o.f8);
+ TestCase.assertEquals(9, o.f9);
+ }
+ {
+ AllowFieldAddDelete_Base o = obj;
+
+ TestCase.assertNotNull(o);
+ TestCase.assertEquals("0", o.f0);
+ TestCase.assertEquals("2", o.f2);
+ TestCase.assertEquals(3, o.f3);
+ TestCase.assertEquals("4", o.f4);
+ TestCase.assertEquals(5, o.f5);
+ TestCase.assertEquals("8", o.f8);
+ TestCase.assertEquals(9, o.f9);
+ }
+ {
+ AllowFieldAddDelete_Embed o = obj.embed;
+
+ TestCase.assertNotNull(o);
+ TestCase.assertEquals("0", o.f0);
+ TestCase.assertEquals("2", o.f2);
+ TestCase.assertEquals(3, o.f3);
+ TestCase.assertEquals("4", o.f4);
+ TestCase.assertEquals(5, o.f5);
+ TestCase.assertEquals("8", o.f8);
+ TestCase.assertEquals(9, o.f9);
+ }
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowFieldAddDelete>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ AllowFieldAddDelete.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((AllowFieldAddDelete)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ static final Object[] fixedFields0 = {
+ "f1", 1,
+ "f2", "2",
+ "f4", "4",
+ "f6", 6,
+ "f7", "7",
+ };
+
+ static final Object[] fixedFields1 = {
+ "f2", "2",
+ "f4", "4",
+ };
+
+ static final Object[] fixedFields2 = {
+ "f0", "0",
+ "f2", "2",
+ "f3", 3,
+ "f4", "4",
+ "f5", 5,
+ "f8", "8",
+ "f9", 9,
+ };
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType baseType = store.getModel().getRawType(NAME2);
+ RawType embedType = store.getModel().getRawType(NAME3);
+
+ Object[] ff;
+ if (expectEvolved) {
+ if (expectUpdated) {
+ ff = fixedFields2;
+ } else {
+ ff = fixedFields1;
+ }
+ } else {
+ ff = fixedFields0;
+ }
+ RawObject embed = new RawObject(embedType, makeValues(ff), null);
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0,
+ NAME2, expectEvolved ? 1 : 0,
+ CASECLS, 0);
+ checkRaw(obj, ff, "key", 99, "embed", embed);
+ checkRaw(obj.getSuper(), ff);
+ }
+
+ private void checkRaw(RawObject obj,
+ Object[] fixedFields,
+ Object... otherFields) {
+ Object[] allFields =
+ new Object[otherFields.length + fixedFields.length];
+ System.arraycopy(otherFields, 0, allFields, 0, otherFields.length);
+ System.arraycopy(fixedFields, 0, allFields,
+ otherFields.length, fixedFields.length);
+ checkRawFields(obj, allFields);
+ }
+ }
+
+ static class ProxiedClass {
+ int data;
+
+ ProxiedClass(int data) {
+ this.data = data;
+ }
+ }
+
+ @Persistent(version=1, proxyFor=ProxiedClass.class)
+ static class ProxiedClass_Proxy implements PersistentProxy<ProxiedClass> {
+ long data;
+
+ public void initializeProxy(ProxiedClass o) {
+ data = o.data;
+ }
+
+ public ProxiedClass convertProxy() {
+ return new ProxiedClass((int) data);
+ }
+ }
+
+ @Entity
+ static class ProxiedClass_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ ProxiedClass_Entity.class.getName();
+ private static final String NAME2 =
+ ProxiedClass_Proxy.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ ProxiedClass embed;
+
+ @Override
+ void configure(EntityModel model, StoreConfig config) {
+ model.registerClass(ProxiedClass_Proxy.class);
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME2, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ProxiedClass_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ProxiedClass_Entity.class);
+ ProxiedClass_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertNotNull(obj.embed);
+ TestCase.assertEquals(88, obj.embed.data);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ProxiedClass_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ProxiedClass_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ProxiedClass_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawType embedType = store.getModel().getRawType(NAME2);
+ RawObject embed;
+ if (expectEvolved) {
+ embed = new RawObject
+ (embedType, makeValues("data", 88L), null);
+ } else {
+ embed = new RawObject
+ (embedType, makeValues("data", 88), null);
+ }
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed);
+ }
+ }
+
+ @Persistent(proxyFor=StringBuffer.class)
+ static class DisallowChangeProxyFor_Proxy2
+ implements PersistentProxy<StringBuffer> {
+
+ String data;
+
+ public void initializeProxy(StringBuffer o) {
+ data = o.toString();
+ }
+
+ public StringBuffer convertProxy() {
+ return new StringBuffer(data);
+ }
+ }
+
+ @Persistent(proxyFor=StringBuilder.class)
+ static class DisallowChangeProxyFor_Proxy
+ implements PersistentProxy<StringBuilder> {
+
+ String data;
+
+ public void initializeProxy(StringBuilder o) {
+ data = o.toString();
+ }
+
+ public StringBuilder convertProxy() {
+ return new StringBuilder(data);
+ }
+ }
+
+ @Entity
+ static class DisallowChangeProxyFor
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Error when evolving class: java.lang.StringBuffer version: 0 to class: java.lang.StringBuffer version: 0 Error: The proxy class for this type has been changed from: com.sleepycat.persist.test.EvolveClasses$DisallowChangeProxyFor_Proxy to: com.sleepycat.persist.test.EvolveClasses$DisallowChangeProxyFor_Proxy2";
+ }
+
+ @Override
+ void configure(EntityModel model, StoreConfig config) {
+ model.registerClass(DisallowChangeProxyFor_Proxy.class);
+ model.registerClass(DisallowChangeProxyFor_Proxy2.class);
+ }
+ }
+
+ @Persistent
+ static class DisallowDeleteProxyFor_Proxy {
+ String data;
+ }
+
+ @Entity
+ static class DisallowDeleteProxyFor
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: java.lang.StringBuffer version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: java.lang.StringBuffer";
+ }
+ }
+
+ @Persistent(version=1)
+ static class ArrayNameChange_Component_Renamed {
+
+ long data;
+ }
+
+ @Entity
+ static class ArrayNameChange_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ ArrayNameChange_Entity.class.getName();
+ private static final String NAME2 =
+ ArrayNameChange_Component_Renamed.class.getName();
+ private static final String NAME3 =
+ PREFIX + "ArrayNameChange_Component";
+
+ @PrimaryKey
+ int key;
+
+ ArrayNameChange_Component_Renamed[] embed;
+ ArrayNameChange_Component_Renamed embed2;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addRenamer(new Renamer(NAME3, 0, NAME2));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 1, NAME3, 0);
+ } else {
+ checkVersions(model, NAME2, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ArrayNameChange_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ ArrayNameChange_Entity.class);
+ ArrayNameChange_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertNotNull(obj.embed);
+ TestCase.assertEquals(1, obj.embed.length);
+ TestCase.assertEquals(88L, obj.embed[0].data);
+ TestCase.assertSame(obj.embed2, obj.embed[0]);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ArrayNameChange_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ ArrayNameChange_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((ArrayNameChange_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ String compTypeName = expectEvolved ? NAME2 : NAME3;
+ String arrayTypeName = "[L" + compTypeName + ';';
+ RawType compType = store.getModel().getRawType(compTypeName);
+ RawType arrayType = store.getModel().getRawType(arrayTypeName);
+ RawObject embed2;
+ if (expectEvolved) {
+ embed2 = new RawObject
+ (compType, makeValues("data", 88L), null);
+ } else {
+ embed2 = new RawObject
+ (compType, makeValues("data", 88), null);
+ }
+ RawObject embed = new RawObject
+ (arrayType, new Object[] { embed2 });
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ checkRawFields(obj, "key", 99, "embed", embed, "embed2", embed2);
+ }
+ }
+
+ enum AddEnumConstant_Enum {
+ A, B, C;
+ }
+
+ @Entity(version=1)
+ static class AddEnumConstant_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ AddEnumConstant_Entity.class.getName();
+ private static final String NAME2 =
+ AddEnumConstant_Enum.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ AddEnumConstant_Enum e1;
+ AddEnumConstant_Enum e2;
+ AddEnumConstant_Enum e3 = AddEnumConstant_Enum.C;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 0, NAME2, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AddEnumConstant_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ AddEnumConstant_Entity.class);
+ AddEnumConstant_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertSame(AddEnumConstant_Enum.A, obj.e1);
+ TestCase.assertSame(AddEnumConstant_Enum.B, obj.e2);
+ TestCase.assertSame(AddEnumConstant_Enum.C, obj.e3);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AddEnumConstant_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ AddEnumConstant_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((AddEnumConstant_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ RawType enumType = store.getModel().getRawType(NAME2);
+ if (expectUpdated) {
+ checkRawFields(obj, "key", 99,
+ "e1", new RawObject(enumType, "A"),
+ "e2", new RawObject(enumType, "B"),
+ "e3", new RawObject(enumType, "C"));
+ } else {
+ checkRawFields(obj, "key", 99,
+ "e1", new RawObject(enumType, "A"),
+ "e2", new RawObject(enumType, "B"));
+ }
+ }
+ }
+
+ enum InsertEnumConstant_Enum {
+ X, A, Y, B, Z;
+ }
+
+ @Persistent
+ static class InsertEnumConstant_KeyClass
+ implements Comparable<InsertEnumConstant_KeyClass > {
+
+ @KeyField(1)
+ InsertEnumConstant_Enum key;
+
+ private InsertEnumConstant_KeyClass() {}
+
+ InsertEnumConstant_KeyClass(InsertEnumConstant_Enum key) {
+ this.key = key;
+ }
+
+ public int compareTo(InsertEnumConstant_KeyClass o) {
+ /* Use the natural order, in spite of insertions. */
+ return key.compareTo(o.key);
+ }
+ }
+
+ @Entity(version=1)
+ static class InsertEnumConstant_Entity
+ extends EvolveCase {
+
+ private static final String NAME =
+ InsertEnumConstant_Entity.class.getName();
+ private static final String NAME2 =
+ InsertEnumConstant_Enum.class.getName();
+ private static final String NAME3 =
+ InsertEnumConstant_KeyClass.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ InsertEnumConstant_KeyClass secKey;
+
+ InsertEnumConstant_Enum e1;
+ InsertEnumConstant_Enum e2;
+ InsertEnumConstant_Enum e3 = InsertEnumConstant_Enum.X;
+ InsertEnumConstant_Enum e4 = InsertEnumConstant_Enum.Y;
+ InsertEnumConstant_Enum e5 = InsertEnumConstant_Enum.Z;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 0, NAME2, 0);
+ checkVersions(model, NAME3, 0, NAME3, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 0);
+ checkVersions(model, NAME3, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertEnumConstant_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ InsertEnumConstant_Entity.class);
+ InsertEnumConstant_Entity obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ if (updated) {
+ TestCase.assertSame(InsertEnumConstant_Enum.X, obj.secKey.key);
+ } else {
+ TestCase.assertSame(InsertEnumConstant_Enum.A, obj.secKey.key);
+ }
+ TestCase.assertSame(InsertEnumConstant_Enum.A, obj.e1);
+ TestCase.assertSame(InsertEnumConstant_Enum.B, obj.e2);
+ TestCase.assertSame(InsertEnumConstant_Enum.X, obj.e3);
+ TestCase.assertSame(InsertEnumConstant_Enum.Y, obj.e4);
+ TestCase.assertSame(InsertEnumConstant_Enum.Z, obj.e5);
+
+ if (doUpdate) {
+ obj.secKey =
+ new InsertEnumConstant_KeyClass(InsertEnumConstant_Enum.X);
+ index.put(obj);
+ updated = true;
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertEnumConstant_Entity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ InsertEnumConstant_Entity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((InsertEnumConstant_Entity)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ RawType enumType = store.getModel().getRawType(NAME2);
+
+ Map<String, Object> secKeyFields = new HashMap<String, Object>();
+ RawType secKeyType = store.getModel().getRawType(NAME3);
+ RawObject secKeyObject =
+ new RawObject(secKeyType, secKeyFields, null /*superObject*/);
+
+ if (expectUpdated) {
+ secKeyFields.put("key", new RawObject(enumType, "X"));
+ checkRawFields(obj, "key", 99,
+ "secKey", secKeyObject,
+ "e1", new RawObject(enumType, "A"),
+ "e2", new RawObject(enumType, "B"),
+ "e3", new RawObject(enumType, "X"),
+ "e4", new RawObject(enumType, "Y"),
+ "e5", new RawObject(enumType, "Z"));
+ } else {
+ secKeyFields.put("key", new RawObject(enumType, "A"));
+ checkRawFields(obj, "key", 99,
+ "secKey", secKeyObject,
+ "e1", new RawObject(enumType, "A"),
+ "e2", new RawObject(enumType, "B"));
+ }
+ }
+ }
+
+ enum DeleteEnumConstant_Enum {
+ A, C;
+ }
+
+ /**
+ * Don't allow deleting (or renaming, which appears as a deletion) enum
+ * values without mutations.
+ */
+ @Entity
+ static class DeleteEnumConstant_NoMutation
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ DeleteEnumConstant_Enum e1;
+ DeleteEnumConstant_Enum e2;
+ DeleteEnumConstant_Enum e3;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Incompatible enum type changed detected when evolving class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 Error: Enum values may not be removed: [B]";
+ }
+ }
+
+ /**
+ * With a Deleter, deleted enum values are null. Note that version is not
+ * bumped.
+ */
+ /* Disabled until support for enum deletion is added.
+ @Entity
+ static class DeleteEnumConstant_WithDeleter
+ extends EvolveCase {
+
+ private static final String NAME =
+ DeleteEnumConstant_WithDeleter.class.getName();
+ private static final String NAME2 =
+ DeleteEnumConstant_Enum.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ DeleteEnumConstant_Enum e1;
+ DeleteEnumConstant_Enum e2;
+ DeleteEnumConstant_Enum e3;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 0, null);
+ checkVersions(model, NAME, 0);
+ if (oldTypesExist) {
+ checkVersions(model, NAME2, 0, NAME2, 0);
+ } else {
+ checkVersions(model, NAME2, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteEnumConstant_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteEnumConstant_WithDeleter.class);
+ DeleteEnumConstant_WithDeleter obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertSame(DeleteEnumConstant_Enum.A, obj.e1);
+ TestCase.assertSame(null, obj.e2);
+ TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e3);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteEnumConstant_WithDeleter>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ DeleteEnumConstant_WithDeleter.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((DeleteEnumConstant_WithDeleter)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0);
+ RawType enumType = store.getModel().getRawType(NAME2);
+ if (expectUpdated) {
+ checkRawFields(obj, "key", 99,
+ "e1", new RawObject(enumType, "A"),
+ "e2", null,
+ "e3", new RawObject(enumType, "C"));
+ } else {
+ checkRawFields(obj, "key", 99,
+ "e1", new RawObject(enumType, "A"),
+ "e2", new RawObject(enumType, "B"),
+ "e3", new RawObject(enumType, "C"));
+ }
+ }
+ }
+ */
+
+ /**
+ * A field converter can assign deleted enum values. Version must be
+ * bumped when a converter is added.
+ */
+ /* Disabled until support for enum deletion is added.
+ @Entity(version=1)
+ static class DeleteEnumConstant_WithConverter
+ extends EvolveCase {
+
+ private static final String NAME =
+ DeleteEnumConstant_WithConverter.class.getName();
+ private static final String NAME2 =
+ DeleteEnumConstant_Enum.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ DeleteEnumConstant_Enum e1;
+ DeleteEnumConstant_Enum e2;
+ DeleteEnumConstant_Enum e3;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ Conversion c = new MyConversion();
+ m.addConverter(new Converter(NAME, 0, "e1", c));
+ m.addConverter(new Converter(NAME, 0, "e2", c));
+ m.addConverter(new Converter(NAME, 0, "e3", c));
+ return m;
+ }
+
+ @SuppressWarnings("serial")
+ static class MyConversion implements Conversion {
+
+ transient RawType newType;
+
+ public void initialize(EntityModel model) {
+ newType = model.getRawType(NAME2);
+ TestCase.assertNotNull(newType);
+ }
+
+ public Object convert(Object fromValue) {
+ TestCase.assertNotNull(newType);
+ RawObject obj = (RawObject) fromValue;
+ String val = obj.getEnum();
+ TestCase.assertNotNull(val);
+ if ("B".equals(val)) {
+ val = "C";
+ }
+ return new RawObject(newType, val);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof MyConversion;
+ }
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 0, NAME2, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteEnumConstant_WithConverter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteEnumConstant_WithConverter.class);
+ DeleteEnumConstant_WithConverter obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertSame(DeleteEnumConstant_Enum.A, obj.e1);
+ TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e2);
+ TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e3);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteEnumConstant_WithConverter>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ DeleteEnumConstant_WithConverter.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((DeleteEnumConstant_WithConverter)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw(store, 99, NAME, expectEvolved ? 1 : 0,
+ CASECLS, 0);
+ RawType enumType = store.getModel().getRawType(NAME2);
+ if (expectEvolved) {
+ checkRawFields(obj, "key", 99,
+ "e1", new RawObject(enumType, "A"),
+ "e2", new RawObject(enumType, "C"),
+ "e3", new RawObject(enumType, "C"));
+ } else {
+ checkRawFields(obj, "key", 99,
+ "e1", new RawObject(enumType, "A"),
+ "e2", new RawObject(enumType, "B"),
+ "e3", new RawObject(enumType, "C"));
+ }
+ }
+ }
+ */
+
+ @Entity
+ static class DisallowChangeKeyRelate
+ extends EvolveCase {
+
+ private static final String NAME =
+ DisallowChangeKeyRelate.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ int skey;
+
+ @Override
+ public String getStoreOpenException() {
+ return "com.sleepycat.persist.evolve.IncompatibleClassException: Change detected in the relate attribute (Relationship) of a secondary key when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowChangeKeyRelate version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowChangeKeyRelate version: 0 Error: Old key: skey relate: ONE_TO_ONE new key: skey relate: MANY_TO_ONE";
+ }
+ }
+
+ @Entity(version=1)
+ static class AllowChangeKeyMetadata
+ extends EvolveCase {
+
+ private static final String NAME =
+ AllowChangeKeyMetadata.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ /*
+ * Combined fields from version 0 and 1:
+ * addAnnotation = 88;
+ * dropField = 77;
+ * dropAnnotation = 66;
+ * addField = 55;
+ * renamedField = 44; // was toBeRenamedField
+ * aa = 33;
+ * ff = 22;
+ */
+
+ int aa;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int addAnnotation;
+
+ int dropAnnotation;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Integer addField;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int renamedField;
+
+ int ff;
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0, "dropField"));
+ m.addRenamer(new Renamer(NAME, 0, "toBeRenamedField",
+ "renamedField"));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowChangeKeyMetadata>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ AllowChangeKeyMetadata.class);
+ AllowChangeKeyMetadata obj = index.get(99);
+ checkValues(obj);
+
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addAnnotation").get(88));
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "renamedField").get(44));
+ if (updated) {
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addField").get(55));
+ } else {
+ TestCase.assertNull(store.getSecondaryIndex
+ (index, Integer.class, "addField").get(55));
+ }
+
+ if (doUpdate) {
+ obj.addField = 55;
+ index.put(obj);
+ updated = true;
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addAnnotation").get(88));
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addField").get(55));
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowChangeKeyMetadata>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ AllowChangeKeyMetadata.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((AllowChangeKeyMetadata)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ private void checkValues(AllowChangeKeyMetadata obj) {
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.addAnnotation);
+ TestCase.assertEquals(66, obj.dropAnnotation);
+ TestCase.assertEquals(44, obj.renamedField);
+ TestCase.assertEquals(33, obj.aa);
+ TestCase.assertEquals(22, obj.ff);
+ if (updated) {
+ TestCase.assertEquals(Integer.valueOf(55), obj.addField);
+ } else {
+ TestCase.assertNull(obj.addField);
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ if (expectUpdated) {
+ checkRawFields(obj, "key", 99,
+ "addAnnotation", 88,
+ "dropAnnotation", 66,
+ "addField", 55,
+ "renamedField", 44,
+ "aa", 33,
+ "ff", 22);
+ } else if (expectEvolved) {
+ checkRawFields(obj, "key", 99,
+ "addAnnotation", 88,
+ "dropAnnotation", 66,
+ "renamedField", 44,
+ "aa", 33,
+ "ff", 22);
+ } else {
+ checkRawFields(obj, "key", 99,
+ "addAnnotation", 88,
+ "dropField", 77,
+ "dropAnnotation", 66,
+ "toBeRenamedField", 44,
+ "aa", 33,
+ "ff", 22);
+ }
+ Environment env = store.getEnvironment();
+ assertDbExists(expectEvolved, env, NAME, "addAnnotation");
+ assertDbExists(expectEvolved, env, NAME, "addField");
+ assertDbExists(expectEvolved, env, NAME, "renamedField");
+ assertDbExists(!expectEvolved, env, NAME, "toBeRenamedField");
+ assertDbExists(!expectEvolved, env, NAME, "dropField");
+ assertDbExists(!expectEvolved, env, NAME, "dropAnnotation");
+ }
+ }
+
+ /**
+ * Same test as AllowChangeKeyMetadata but with the secondary keys in an
+ * entity subclass. [#16253]
+ */
+ @Persistent(version=1)
+ static class AllowChangeKeyMetadataInSubclass
+ extends AllowChangeKeyMetadataEntity {
+
+ private static final String NAME =
+ AllowChangeKeyMetadataInSubclass.class.getName();
+ private static final String NAME2 =
+ AllowChangeKeyMetadataEntity.class.getName();
+
+ /*
+ * Combined fields from version 0 and 1:
+ * addAnnotation = 88;
+ * dropField = 77;
+ * dropAnnotation = 66;
+ * addField = 55;
+ * renamedField = 44; // was toBeRenamedField
+ * aa = 33;
+ * ff = 22;
+ */
+
+ int aa;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int addAnnotation;
+
+ int dropAnnotation;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Integer addField;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int renamedField;
+
+ int ff;
+
+ @Override
+ void configure(EntityModel model, StoreConfig config) {
+ model.registerClass(AllowChangeKeyMetadataInSubclass.class);
+ }
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addDeleter(new Deleter(NAME, 0, "dropField"));
+ m.addRenamer(new Renamer(NAME, 0, "toBeRenamedField",
+ "renamedField"));
+ return m;
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkNonEntity(true, model, env, NAME, 1);
+ checkEntity(true, model, env, NAME2, 0, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ checkVersions(model, NAME2, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ checkVersions(model, NAME2, 0);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowChangeKeyMetadataEntity>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ AllowChangeKeyMetadataEntity.class);
+ AllowChangeKeyMetadataEntity obj = index.get(99);
+ checkValues(obj);
+
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addAnnotation").get(88));
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "renamedField").get(44));
+ if (updated) {
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addField").get(55));
+ } else {
+ TestCase.assertNull(store.getSecondaryIndex
+ (index, Integer.class, "addField").get(55));
+ }
+
+ if (doUpdate) {
+ ((AllowChangeKeyMetadataInSubclass) obj).addField = 55;
+ index.put(obj);
+ updated = true;
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addAnnotation").get(88));
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "addField").get(55));
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowChangeKeyMetadataEntity>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ AllowChangeKeyMetadataEntity.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99);
+ index.put((AllowChangeKeyMetadataInSubclass)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ private void checkValues(AllowChangeKeyMetadataEntity objParam) {
+ AllowChangeKeyMetadataInSubclass obj =
+ (AllowChangeKeyMetadataInSubclass) objParam;
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals(88, obj.addAnnotation);
+ TestCase.assertEquals(66, obj.dropAnnotation);
+ TestCase.assertEquals(44, obj.renamedField);
+ TestCase.assertEquals(33, obj.aa);
+ TestCase.assertEquals(22, obj.ff);
+ if (updated) {
+ TestCase.assertEquals(Integer.valueOf(55), obj.addField);
+ } else {
+ TestCase.assertNull(obj.addField);
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, NAME2, 99, NAME, expectEvolved ? 1 : 0,
+ NAME2, 0, CASECLS, 0);
+ checkRawFields(obj.getSuper(), "key", 99);
+ if (expectUpdated) {
+ checkRawFields(obj,
+ "addAnnotation", 88,
+ "dropAnnotation", 66,
+ "addField", 55,
+ "renamedField", 44,
+ "aa", 33,
+ "ff", 22);
+ } else if (expectEvolved) {
+ checkRawFields(obj,
+ "addAnnotation", 88,
+ "dropAnnotation", 66,
+ "renamedField", 44,
+ "aa", 33,
+ "ff", 22);
+ } else {
+ checkRawFields(obj,
+ "addAnnotation", 88,
+ "dropField", 77,
+ "dropAnnotation", 66,
+ "toBeRenamedField", 44,
+ "aa", 33,
+ "ff", 22);
+ }
+ Environment env = store.getEnvironment();
+ assertDbExists(expectEvolved, env, NAME2, "addAnnotation");
+ assertDbExists(expectEvolved, env, NAME2, "addField");
+ assertDbExists(expectEvolved, env, NAME2, "renamedField");
+ assertDbExists(!expectEvolved, env, NAME2, "toBeRenamedField");
+ assertDbExists(!expectEvolved, env, NAME2, "dropField");
+ assertDbExists(!expectEvolved, env, NAME2, "dropAnnotation");
+ }
+ }
+
+ @Entity
+ static class AllowChangeKeyMetadataEntity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+ }
+
+ /**
+ * Special case of adding secondaries that caused
+ * IndexOutOfBoundsException. [#15524]
+ */
+ @Entity(version=1)
+ static class AllowAddSecondary
+ extends EvolveCase {
+
+ private static final String NAME =
+ AllowAddSecondary.class.getName();
+
+ @PrimaryKey
+ long key;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int a;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int b;
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Long,AllowAddSecondary>
+ index = store.getPrimaryIndex
+ (Long.class,
+ AllowAddSecondary.class);
+ AllowAddSecondary obj = index.get(99L);
+ checkValues(obj);
+
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "a").get(1));
+ if (updated) {
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "b").get(3));
+ TestCase.assertNull(store.getSecondaryIndex
+ (index, Integer.class, "b").get(2));
+ } else {
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "b").get(2));
+ TestCase.assertNull(store.getSecondaryIndex
+ (index, Integer.class, "b").get(3));
+ }
+
+ if (doUpdate) {
+ obj.b = 3;
+ index.put(obj);
+ updated = true;
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "a").get(1));
+ checkValues(store.getSecondaryIndex
+ (index, Integer.class, "b").get(3));
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Long,AllowAddSecondary>
+ index = newStore.getPrimaryIndex
+ (Long.class,
+ AllowAddSecondary.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99L);
+ index.put((AllowAddSecondary)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ private void checkValues(AllowAddSecondary obj) {
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99L, obj.key);
+ TestCase.assertEquals(1, obj.a);
+ if (updated) {
+ TestCase.assertEquals(3, obj.b);
+ } else {
+ TestCase.assertEquals(2, obj.b);
+ }
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, 99L, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ if (expectUpdated) {
+ checkRawFields(obj, "key", 99L,
+ "a", 1,
+ "b", 3);
+ } else {
+ checkRawFields(obj, "key", 99L,
+ "a", 1,
+ "b", 2);
+ }
+ Environment env = store.getEnvironment();
+ assertDbExists(expectEvolved, env, NAME, "a");
+ assertDbExists(expectEvolved, env, NAME, "b");
+ }
+ }
+
+ @Entity(version=1)
+ static class FieldAddAndConvert
+ extends EvolveCase {
+
+ private static final String NAME =
+ FieldAddAndConvert.class.getName();
+
+ @PrimaryKey
+ int key;
+
+ private final String f0 = "0"; // new field
+ private final String f1 = "1"; // converted field
+ private final String f2 = "2"; // new field
+ private final String f3 = "3"; // converted field
+ private final String f4 = "4"; // new field
+
+ @Override
+ Mutations getMutations() {
+ Mutations m = new Mutations();
+ m.addConverter(new Converter(NAME, 0, "f1", new IntToString()));
+ m.addConverter(new Converter(NAME, 0, "f3", new IntToString()));
+ return m;
+ }
+
+ @SuppressWarnings("serial")
+ private static class IntToString implements Conversion {
+
+ public void initialize(EntityModel model) {
+ }
+
+ public Object convert(Object fromValue) {
+ return fromValue.toString();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return other instanceof IntToString;
+ }
+ }
+
+ @Override
+ void checkEvolvedModel(EntityModel model,
+ Environment env,
+ boolean oldTypesExist) {
+ checkEntity(true, model, env, NAME, 1, null);
+ if (oldTypesExist) {
+ checkVersions(model, NAME, 1, NAME, 0);
+ } else {
+ checkVersions(model, NAME, 1);
+ }
+ }
+
+ @Override
+ void readObjects(EntityStore store, boolean doUpdate)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,FieldAddAndConvert>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ FieldAddAndConvert.class);
+ FieldAddAndConvert obj = index.get(99);
+ TestCase.assertNotNull(obj);
+ TestCase.assertEquals(99, obj.key);
+ TestCase.assertEquals("0", obj.f0);
+ TestCase.assertEquals("1", obj.f1);
+ TestCase.assertEquals("2", obj.f2);
+ TestCase.assertEquals("3", obj.f3);
+ TestCase.assertEquals("4", obj.f4);
+
+ if (doUpdate) {
+ index.put(obj);
+ }
+ }
+
+ @Override
+ void copyRawObjects(RawStore rawStore, EntityStore newStore)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,FieldAddAndConvert>
+ index = newStore.getPrimaryIndex
+ (Integer.class,
+ FieldAddAndConvert.class);
+ RawObject raw = rawStore.getPrimaryIndex(NAME).get(99);
+ index.put((FieldAddAndConvert)
+ newStore.getModel().convertRawObject(raw));
+ }
+
+ @Override
+ void readRawObjects(RawStore store,
+ boolean expectEvolved,
+ boolean expectUpdated)
+ throws DatabaseException {
+
+ RawObject obj = readRaw
+ (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0);
+ if (expectUpdated) {
+ checkRawFields(obj,
+ "key", 99,
+ "f0", "0",
+ "f1", "1",
+ "f2", "2",
+ "f3", "3",
+ "f4", "4");
+ } else if (expectEvolved) {
+ checkRawFields(obj,
+ "key", 99,
+ "f1", "1",
+ "f3", "3");
+ } else {
+ checkRawFields(obj,
+ "key", 99,
+ "f1", 1,
+ "f3", 3);
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java.original b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java.original
new file mode 100644
index 0000000..c077920
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveClasses.java.original
@@ -0,0 +1,2855 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import java.math.BigInteger;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.evolve.Mutations;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawStore;
+
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+/**
+ * Nested classes are original versions of classes of the same name in
+ * EvolveClasses.java. See EvolveTestBase.java for the steps that are taken to
+ * add a new class (test case).
+ *
+ * @author Mark Hayes
+ */
+class EvolveClasses {
+
+ @Entity
+ static class DeletedEntity1_ClassRemoved extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedEntity1_ClassRemoved> index =
+ store.getPrimaryIndex
+ (Integer.class, DeletedEntity1_ClassRemoved.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,DeletedEntity1_ClassRemoved>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ DeletedEntity1_ClassRemoved.class.getName());
+ assertDbExists
+ (true, env,
+ DeletedEntity1_ClassRemoved.class.getName(), "skey");
+ }
+ }
+
+ @Entity
+ static class DeletedEntity2_ClassRemoved extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedEntity2_ClassRemoved> index =
+ store.getPrimaryIndex
+ (Integer.class, DeletedEntity2_ClassRemoved.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,DeletedEntity2_ClassRemoved>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ DeletedEntity2_ClassRemoved.class.getName());
+ assertDbExists
+ (true, env,
+ DeletedEntity2_ClassRemoved.class.getName(), "skey");
+ }
+ }
+
+ @Entity
+ static class DeletedEntity3_AnnotRemoved_NoMutation extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedEntity3_AnnotRemoved_NoMutation>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedEntity3_AnnotRemoved_NoMutation.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,
+ DeletedEntity3_AnnotRemoved_NoMutation>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ DeletedEntity3_AnnotRemoved_NoMutation.class.getName());
+ assertDbExists
+ (true, env,
+ DeletedEntity3_AnnotRemoved_NoMutation.class.getName(),
+ "skey");
+ }
+ }
+
+ @Entity
+ static class DeletedEntity4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedEntity4_AnnotRemoved_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedEntity4_AnnotRemoved_WithDeleter.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,
+ DeletedEntity4_AnnotRemoved_WithDeleter>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ DeletedEntity4_AnnotRemoved_WithDeleter.class.getName());
+ assertDbExists
+ (true, env,
+ DeletedEntity4_AnnotRemoved_WithDeleter.class.getName(),
+ "skey");
+ }
+ }
+
+ @Entity
+ static class DeletedEntity5_EntityToPersist_NoMutation extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedEntity5_EntityToPersist_NoMutation>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedEntity5_EntityToPersist_NoMutation.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,
+ DeletedEntity5_EntityToPersist_NoMutation>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ DeletedEntity5_EntityToPersist_NoMutation.class.getName());
+ assertDbExists
+ (true, env,
+ DeletedEntity5_EntityToPersist_NoMutation.class.getName(),
+ "skey");
+ }
+ }
+
+ @Entity
+ static class DeletedEntity6_EntityToPersist_WithDeleter extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedEntity6_EntityToPersist_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedEntity6_EntityToPersist_WithDeleter.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,
+ DeletedEntity6_EntityToPersist_WithDeleter>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ DeletedEntity6_EntityToPersist_WithDeleter.class.getName());
+ assertDbExists
+ (true, env,
+ DeletedEntity6_EntityToPersist_WithDeleter.class.getName(),
+ "skey");
+ }
+ }
+
+ @Persistent
+ static class DeletedPersist1_ClassRemoved {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeletedPersist1_ClassRemoved_NoMutation extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ DeletedPersist1_ClassRemoved embed =
+ new DeletedPersist1_ClassRemoved();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedPersist1_ClassRemoved_NoMutation>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist1_ClassRemoved_NoMutation.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeletedPersist2_ClassRemoved {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeletedPersist2_ClassRemoved_WithDeleter extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ DeletedPersist2_ClassRemoved embed =
+ new DeletedPersist2_ClassRemoved();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedPersist2_ClassRemoved_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist2_ClassRemoved_WithDeleter.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeletedPersist3_AnnotRemoved {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeletedPersist3_AnnotRemoved_NoMutation extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ DeletedPersist3_AnnotRemoved embed =
+ new DeletedPersist3_AnnotRemoved();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedPersist3_AnnotRemoved_NoMutation>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist3_AnnotRemoved_NoMutation.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeletedPersist4_AnnotRemoved {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeletedPersist4_AnnotRemoved_WithDeleter extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ DeletedPersist4_AnnotRemoved embed =
+ new DeletedPersist4_AnnotRemoved();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedPersist4_AnnotRemoved_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist4_AnnotRemoved_WithDeleter.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeletedPersist5_PersistToEntity {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeletedPersist5_PersistToEntity_NoMutation
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ DeletedPersist5_PersistToEntity embed =
+ new DeletedPersist5_PersistToEntity();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedPersist5_PersistToEntity_NoMutation>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist5_PersistToEntity_NoMutation.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeletedPersist6_PersistToEntity {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeletedPersist6_PersistToEntity_WithDeleter
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ DeletedPersist6_PersistToEntity embed =
+ new DeletedPersist6_PersistToEntity();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeletedPersist6_PersistToEntity_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeletedPersist6_PersistToEntity_WithDeleter.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class RenamedEntity1_NewEntityName
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,RenamedEntity1_NewEntityName>
+ index = store.getPrimaryIndex
+ (Integer.class, RenamedEntity1_NewEntityName.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,RenamedEntity1_NewEntityName>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ RenamedEntity1_NewEntityName.class.getName());
+ assertDbExists
+ (true, env,
+ RenamedEntity1_NewEntityName.class.getName(), "skey");
+ }
+ }
+
+ @Entity
+ static class RenamedEntity2_NewEntityName
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,RenamedEntity2_NewEntityName>
+ index = store.getPrimaryIndex
+ (Integer.class, RenamedEntity2_NewEntityName.class);
+ index.put(this);
+
+ SecondaryIndex<Integer,Integer,RenamedEntity2_NewEntityName>
+ sindex = store.getSecondaryIndex(index, Integer.class, "skey");
+ TestCase.assertNotNull(sindex.get(88));
+ }
+
+ @Override
+ void checkUnevolvedModel(EntityModel model, Environment env) {
+ assertDbExists
+ (true, env,
+ RenamedEntity2_NewEntityName.class.getName());
+ assertDbExists
+ (true, env,
+ RenamedEntity2_NewEntityName.class.getName(), "skey");
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass1_BaseClass
+ extends EvolveCase {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeleteSuperclass1_NoMutation
+ extends DeleteSuperclass1_BaseClass {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass1_NoMutation>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass1_NoMutation.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass2_BaseClass
+ extends EvolveCase {
+
+ int f = 123;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 456;
+ }
+
+ @Entity
+ static class DeleteSuperclass2_WithConverter
+ extends DeleteSuperclass2_BaseClass {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey2 = 77;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Integer skey3 = 66;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass2_WithConverter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass2_WithConverter.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass3_BaseClass
+ extends EvolveCase {
+
+ int f = 123;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey = 456;
+ }
+
+ @Entity
+ static class DeleteSuperclass3_WithDeleter
+ extends DeleteSuperclass3_BaseClass {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass3_WithDeleter>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass3_WithDeleter.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass4_BaseClass
+ extends EvolveCase {
+ }
+
+ @Entity
+ static class DeleteSuperclass4_NoFields
+ extends DeleteSuperclass4_BaseClass {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass4_NoFields>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass4_NoFields.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DeleteSuperclass5_Embedded_Base {
+
+ int g = 456;
+ }
+
+ @Persistent
+ static class DeleteSuperclass5_Embedded
+ extends DeleteSuperclass5_Embedded_Base {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class DeleteSuperclass5_Top
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ DeleteSuperclass5_Embedded embed =
+ new DeleteSuperclass5_Embedded();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteSuperclass5_Top>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DeleteSuperclass5_Top.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class InsertSuperclass1_Between
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertSuperclass1_Between>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ InsertSuperclass1_Between.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class InsertSuperclass2_Embedded {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class InsertSuperclass2_Top
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ InsertSuperclass2_Embedded embed =
+ new InsertSuperclass2_Embedded();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertSuperclass2_Top>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ InsertSuperclass2_Top.class);
+ index.put(this);
+ }
+ }
+
+ /*
+ @Persistent
+ static class RenameFields1_Base
+ extends EvolveCase {
+
+ int f = 123;
+ }
+
+ @Entity
+ static class RenameFields1
+ extends RenameFields1_Base {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,RenameFields1>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ RenameFields1.class);
+ index.put(this);
+ }
+ }
+ */
+
+ @Entity
+ static class DisallowNonKeyField_PrimitiveToObject
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_PrimitiveToObject>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_PrimitiveToObject.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_ObjectToPrimitive
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ String ff = "88";
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_ObjectToPrimitive>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_ObjectToPrimitive.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class MyType {
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof MyType;
+ }
+ }
+
+ @Persistent
+ static class MySubtype extends MyType {
+
+ @Override
+ public boolean equals(Object o) {
+ return o instanceof MySubtype;
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_ObjectToSubtype
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ MyType ff = new MyType();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_ObjectToSubtype>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_ObjectToSubtype.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_ObjectToUnrelatedSimple
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Integer ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_ObjectToUnrelatedSimple>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_ObjectToUnrelatedSimple.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_ObjectToUnrelatedOther
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Integer ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_ObjectToUnrelatedOther>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_ObjectToUnrelatedOther.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_byte2boolean
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ byte ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_byte2boolean>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_byte2boolean.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_short2byte
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ short ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_short2byte>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_short2byte.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_int2short
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_int2short>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_int2short.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_long2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ long ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_long2int>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_long2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_float2long
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ float ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_float2long>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_float2long.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_double2float
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ double ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_double2float>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_double2float.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_Byte2byte
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Byte ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_Byte2byte>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_Byte2byte.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_Character2char
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Character ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_Character2char>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_Character2char.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_Short2short
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Short ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_Short2short>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_Short2short.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_Integer2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Integer ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_Integer2int>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_Integer2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_Long2long
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Long ff = 88L;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_Long2long>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_Long2long.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_Float2float
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Float ff = 88F;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_Float2float>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_Float2float.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_Double2double
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ Double ff = 88D;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_Double2double>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_Double2double.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_float2BigInt
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ float ff = 88F;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_float2BigInt>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_float2BigInt.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowNonKeyField_BigInt2long
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ BigInteger ff = BigInteger.valueOf(88);
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowNonKeyField_BigInt2long>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowNonKeyField_BigInt2long.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_byte2short
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ byte ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_byte2short>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_byte2short.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_char2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ char ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_char2int>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_char2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_short2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ short ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_short2int>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_short2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_int2long
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_int2long>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_int2long.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_long2float
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ long ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_long2float>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_long2float.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_float2double
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ float ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_float2double>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_float2double.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_Byte2short2
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Byte ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_Byte2short2>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_Byte2short2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_Character2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Character ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_Character2int>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_Character2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_Short2int2
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Short ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_Short2int2>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_Short2int2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_Integer2long
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Integer ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_Integer2long>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_Integer2long.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_Long2float2
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Long ff = 88L;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_Long2float2>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_Long2float2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_Float2double2
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ Float ff = 88F;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_Float2double2>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_Float2double2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowSecKeyField_int2BigInt
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int ff = 88;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowSecKeyField_int2BigInt>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowSecKeyField_int2BigInt.class);
+ index.put(this);
+ }
+ }
+
+ // --
+
+ @Entity
+ static class DisallowPriKeyField_byte2short
+ extends EvolveCase {
+
+ @PrimaryKey
+ byte key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,DisallowPriKeyField_byte2short>
+ index = store.getPrimaryIndex
+ (Byte.class,
+ DisallowPriKeyField_byte2short.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_char2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ char key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Character,DisallowPriKeyField_char2int>
+ index = store.getPrimaryIndex
+ (Character.class,
+ DisallowPriKeyField_char2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_short2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ short key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Short,DisallowPriKeyField_short2int>
+ index = store.getPrimaryIndex
+ (Short.class,
+ DisallowPriKeyField_short2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_int2long
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowPriKeyField_int2long>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowPriKeyField_int2long.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_long2float
+ extends EvolveCase {
+
+ @PrimaryKey
+ long key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Long,DisallowPriKeyField_long2float>
+ index = store.getPrimaryIndex
+ (Long.class,
+ DisallowPriKeyField_long2float.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_float2double
+ extends EvolveCase {
+
+ @PrimaryKey
+ float key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Float,DisallowPriKeyField_float2double>
+ index = store.getPrimaryIndex
+ (Float.class,
+ DisallowPriKeyField_float2double.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_Byte2short2
+ extends EvolveCase {
+
+ @PrimaryKey
+ Byte key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,DisallowPriKeyField_Byte2short2>
+ index = store.getPrimaryIndex
+ (Byte.class,
+ DisallowPriKeyField_Byte2short2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_Character2int
+ extends EvolveCase {
+
+ @PrimaryKey
+ Character key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Character,DisallowPriKeyField_Character2int>
+ index = store.getPrimaryIndex
+ (Character.class,
+ DisallowPriKeyField_Character2int.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_Short2int2
+ extends EvolveCase {
+
+ @PrimaryKey
+ Short key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Short,DisallowPriKeyField_Short2int2>
+ index = store.getPrimaryIndex
+ (Short.class,
+ DisallowPriKeyField_Short2int2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_Integer2long
+ extends EvolveCase {
+
+ @PrimaryKey
+ Integer key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowPriKeyField_Integer2long>
+ index = store.getPrimaryIndex
+ (Integer.class,
+ DisallowPriKeyField_Integer2long.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_Long2float2
+ extends EvolveCase {
+
+ @PrimaryKey
+ Long key = 99L;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Long,DisallowPriKeyField_Long2float2>
+ index = store.getPrimaryIndex
+ (Long.class,
+ DisallowPriKeyField_Long2float2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_Float2double2
+ extends EvolveCase {
+
+ @PrimaryKey
+ Float key = 99F;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Float,DisallowPriKeyField_Float2double2>
+ index = store.getPrimaryIndex
+ (Float.class,
+ DisallowPriKeyField_Float2double2.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class DisallowPriKeyField_Long2BigInt
+ extends EvolveCase {
+
+ @PrimaryKey
+ Long key = 99L;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Long,DisallowPriKeyField_Long2BigInt>
+ index = store.getPrimaryIndex
+ (Long.class,
+ DisallowPriKeyField_Long2BigInt.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class DisallowCompositeKeyField_byte2short_Key {
+
+ @KeyField(1)
+ int f1 = 1;
+
+ @KeyField(2)
+ byte f2 = 2;
+
+ @KeyField(3)
+ String f3 = "3";
+ }
+
+ @Entity
+ static class DisallowCompositeKeyField_byte2short
+ extends EvolveCase {
+
+ @PrimaryKey
+ DisallowCompositeKeyField_byte2short_Key key =
+ new DisallowCompositeKeyField_byte2short_Key();
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<DisallowCompositeKeyField_byte2short_Key,
+ DisallowCompositeKeyField_byte2short>
+ index = store.getPrimaryIndex
+ (DisallowCompositeKeyField_byte2short_Key.class,
+ DisallowCompositeKeyField_byte2short.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class AllowPriKeyField_byte2Byte
+ extends EvolveCase {
+
+ @PrimaryKey
+ byte key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,AllowPriKeyField_byte2Byte>
+ index = store.getPrimaryIndex
+ (Byte.class, AllowPriKeyField_byte2Byte.class);
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class AllowPriKeyField_Byte2byte2
+ extends EvolveCase {
+
+ @PrimaryKey
+ Byte key = 99;
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Byte,AllowPriKeyField_Byte2byte2>
+ index = store.getPrimaryIndex
+ (Byte.class, AllowPriKeyField_Byte2byte2.class);
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class AllowFieldTypeChanges_Key {
+
+ AllowFieldTypeChanges_Key() {
+ this(false);
+ }
+
+ AllowFieldTypeChanges_Key(boolean init) {
+ if (init) {
+ f1 = true;
+ f2 = (byte) 2;
+ f3 = (short) 3;
+ f4 = 4;
+ f5 = 5L;
+ f6 = 6F;
+ f7 = 7D;
+ f8 = (char) 8;
+ f9 = true;
+ f10 = (byte) 10;
+ f11 = (short) 11;
+ f12 = 12;
+ f13 = 13L;
+ f14 = 14F;
+ f15 = 15D;
+ f16 = (char) 16;
+ }
+ }
+
+ @KeyField(1)
+ boolean f1;
+
+ @KeyField(2)
+ byte f2;
+
+ @KeyField(3)
+ short f3;
+
+ @KeyField(4)
+ int f4;
+
+ @KeyField(5)
+ long f5;
+
+ @KeyField(6)
+ float f6;
+
+ @KeyField(7)
+ double f7;
+
+ @KeyField(8)
+ char f8;
+
+ @KeyField(9)
+ Boolean f9;
+
+ @KeyField(10)
+ Byte f10;
+
+ @KeyField(11)
+ Short f11;
+
+ @KeyField(12)
+ Integer f12;
+
+ @KeyField(13)
+ Long f13;
+
+ @KeyField(14)
+ Float f14;
+
+ @KeyField(15)
+ Double f15;
+
+ @KeyField(16)
+ Character f16;
+ }
+
+ @Persistent
+ static class AllowFieldTypeChanges_Base
+ extends EvolveCase {
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ AllowFieldTypeChanges_Key kcomposite =
+ new AllowFieldTypeChanges_Key(true);
+
+ long f_long2Integer = 111;
+ String f_String2Long = "222";
+ }
+
+ @Entity
+ static class AllowFieldTypeChanges
+ extends AllowFieldTypeChanges_Base {
+
+ @PrimaryKey
+ int pkeyint = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ boolean kboolean = true;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ byte kbyte = 77;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ short kshort = 66;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int kint = 55;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ long klong = 44;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ float kfloat = 33;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ double kdouble = 22;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ char kchar = 11;
+
+ byte f01;
+ byte f02;
+ byte f03;
+ byte f04;
+ byte f06;
+ short f07;
+ short f08;
+ short f09;
+ short f10;
+ char f11;
+ char f12;
+ char f13;
+ char f14;
+ int f15;
+ int f16;
+ int f17;
+ long f18;
+ long f19;
+ float f20;
+
+ byte f21;
+ byte f22;
+ byte f23;
+ byte f24;
+ byte f26;
+ short f27;
+ short f28;
+ short f29;
+ short f30;
+ char f31;
+ char f32;
+ char f33;
+ char f34;
+ int f35;
+ int f36;
+ int f37;
+ long f38;
+ long f39;
+ float f40;
+
+ Byte f41;
+ Byte f42;
+ Byte f43;
+ Byte f44;
+ Byte f46;
+ Short f47;
+ Short f48;
+ Short f49;
+ Short f50;
+ Character f51;
+ Character f52;
+ Character f53;
+ Character f54;
+ Integer f55;
+ Integer f56;
+ Integer f57;
+ Long f58;
+ Long f59;
+ Float f60;
+
+ byte f70;
+ short f71;
+ char f72;
+ int f73;
+ long f74;
+ Byte f75;
+ Short f76;
+ Character f77;
+ Integer f78;
+ Long f79;
+
+ long f_long2int = 333;
+ String f_String2long = "444";
+
+ private void init() {
+ f01 = (byte) 1;
+ f02 = (byte) 2;
+ f03 = (byte) 3;
+ f04 = (byte) 4;
+ f06 = (byte) 6;
+ f07 = (short) 7;
+ f08 = (short) 8;
+ f09 = (short) 9;
+ f10 = (short) 10;
+ f11 = (char) 11;
+ f12 = (char) 12;
+ f13 = (char) 13;
+ f14 = (char) 14;
+ f15 = 15;
+ f16 = 16;
+ f17 = 17;
+ f18 = (long) 18;
+ f19 = (long) 19;
+ f20 = (float) 20;
+
+ f21 = (byte) 21;
+ f22 = (byte) 22;
+ f23 = (byte) 23;
+ f24 = (byte) 24;
+ f26 = (byte) 26;
+ f27 = (short) 27;
+ f28 = (short) 28;
+ f29 = (short) 29;
+ f30 = (short) 30;
+ f31 = (char) 31;
+ f32 = (char) 32;
+ f33 = (char) 33;
+ f34 = (char) 34;
+ f35 = 35;
+ f36 = 36;
+ f37 = 37;
+ f38 = (long) 38;
+ f39 = (long) 39;
+ f40 = (float) 40;
+
+ f41 = (byte) 41;
+ f42 = (byte) 42;
+ f43 = (byte) 43;
+ f44 = (byte) 44;
+ f46 = (byte) 46;
+ f47 = (short) 47;
+ f48 = (short) 48;
+ f49 = (short) 49;
+ f50 = (short) 50;
+ f51 = (char) 51;
+ f52 = (char) 52;
+ f53 = (char) 53;
+ f54 = (char) 54;
+ f55 = 55;
+ f56 = 56;
+ f57 = 57;
+ f58 = (long) 58;
+ f59 = (long) 59;
+ f60 = (float) 60;
+
+ f70 = (byte) 70;
+ f71 = (short) 71;
+ f72 = (char) 72;
+ f73 = 73;
+ f74 = (long) 74;
+ f75 = (byte) 75;
+ f76 = (short) 76;
+ f77 = (char) 77;
+ f78 = 78;
+ f79 = (long) 79;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowFieldTypeChanges>
+ index = store.getPrimaryIndex
+ (Integer.class, AllowFieldTypeChanges.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class ConvertFieldContent_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ String f1;
+ String f2;
+
+ private void init() {
+ key = 99;
+ f1 = "01234";
+ f2 = "56789";
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertFieldContent_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, ConvertFieldContent_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class ConvertExample1_Address {
+ String street;
+ String city;
+ String state;
+ String zipCode;
+ }
+
+ @Entity
+ static class ConvertExample1_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample1_Address embed;
+
+ private void init() {
+ key = 99;
+ embed = new ConvertExample1_Address();
+ embed.street = "street";
+ embed.city = "city";
+ embed.state = "state";
+ embed.zipCode = "12345";
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample1_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, ConvertExample1_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class ConvertExample2_Person
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ String address;
+
+ private void init() {
+ key = 99;
+ address = "street#city#state#12345";
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample2_Person>
+ index = store.getPrimaryIndex
+ (Integer.class, ConvertExample2_Person.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class ConvertExample3_Person
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ String street;
+ String city;
+ String state;
+ int zipCode;
+
+ private void init() {
+ key = 99;
+ street = "street";
+ city = "city";
+ state = "state";
+ zipCode = 12345;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample3_Person>
+ index = store.getPrimaryIndex
+ (Integer.class, ConvertExample3_Person.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class ConvertExample3Reverse_Address {
+ String street;
+ String city;
+ String state;
+ int zipCode;
+ }
+
+ @Entity
+ static class ConvertExample3Reverse_Person
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample3Reverse_Address address;
+
+ private void init() {
+ key = 99;
+ address = new ConvertExample3Reverse_Address();
+ address.street = "street";
+ address.city = "city";
+ address.state = "state";
+ address.zipCode = 12345;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample3Reverse_Person>
+ index = store.getPrimaryIndex
+ (Integer.class, ConvertExample3Reverse_Person.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class ConvertExample4_A extends ConvertExample4_B {
+ String name;
+ }
+
+ @Persistent
+ static class ConvertExample4_B {
+ }
+
+ @Entity
+ static class ConvertExample4_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample4_A embed;
+
+ private void init() {
+ key = 99;
+ embed = new ConvertExample4_A();
+ embed.name = "name";
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample4_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, ConvertExample4_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class ConvertExample5_Pet {
+ String name;
+ boolean isCatNotDog;
+ int finickyLevel;
+ double barkVolume;
+ }
+
+ @Entity
+ static class ConvertExample5_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ ConvertExample5_Pet cat;
+ ConvertExample5_Pet dog;
+
+ private void init() {
+ key = 99;
+ cat = new ConvertExample5_Pet();
+ cat.name = "Jeffry";
+ cat.isCatNotDog = true;
+ cat.finickyLevel = 999;
+ dog = new ConvertExample5_Pet();
+ dog.name = "Nelson";
+ dog.isCatNotDog = false;
+ dog.barkVolume = 0.01;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ConvertExample5_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, ConvertExample5_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class AllowFieldAddDelete_Embed {
+ private int f1 = 1;
+ private String f2 = "2";
+ private String f4 = "4";
+ private int f6 = 6;
+ private String f7 = "7";
+ }
+
+ @Persistent
+ static class AllowFieldAddDelete_Base
+ extends EvolveCase {
+
+ private int f1 = 1;
+ private String f2 = "2";
+ private String f4 = "4";
+ private int f6 = 6;
+ private String f7 = "7";
+ }
+
+ @Entity
+ static class AllowFieldAddDelete
+ extends AllowFieldAddDelete_Base {
+
+ @PrimaryKey
+ int key;
+
+ AllowFieldAddDelete_Embed embed;
+
+ private int f1 = 1;
+ private String f2 = "2";
+ private String f4 = "4";
+ private int f6 = 6;
+ private String f7 = "7";
+
+ private void init() {
+ key = 99;
+ embed = new AllowFieldAddDelete_Embed();
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowFieldAddDelete>
+ index = store.getPrimaryIndex
+ (Integer.class, AllowFieldAddDelete.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ static class ProxiedClass {
+ int data;
+
+ ProxiedClass(int data) {
+ this.data = data;
+ }
+ }
+
+ @Persistent(proxyFor=ProxiedClass.class)
+ static class ProxiedClass_Proxy implements PersistentProxy<ProxiedClass> {
+ int data;
+
+ public void initializeProxy(ProxiedClass o) {
+ data = o.data;
+ }
+
+ public ProxiedClass convertProxy() {
+ return new ProxiedClass(data);
+ }
+ }
+
+ @Entity
+ static class ProxiedClass_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ ProxiedClass embed;
+
+ private void init() {
+ key = 99;
+ embed = new ProxiedClass(88);
+ }
+
+ @Override
+ void configure(EntityModel model, StoreConfig config) {
+ model.registerClass(ProxiedClass_Proxy.class);
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ProxiedClass_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, ProxiedClass_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent(proxyFor=StringBuffer.class)
+ static class DisallowChangeProxyFor_Proxy
+ implements PersistentProxy<StringBuffer> {
+
+ String data;
+
+ public void initializeProxy(StringBuffer o) {
+ data = o.toString();
+ }
+
+ public StringBuffer convertProxy() {
+ return new StringBuffer(data);
+ }
+ }
+
+ @Entity
+ static class DisallowChangeProxyFor
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ private void init() {
+ key = 99;
+ }
+
+ @Override
+ void configure(EntityModel model, StoreConfig config) {
+ model.registerClass(DisallowChangeProxyFor_Proxy.class);
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowChangeProxyFor>
+ index = store.getPrimaryIndex
+ (Integer.class, DisallowChangeProxyFor.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent(proxyFor=StringBuffer.class)
+ static class DisallowDeleteProxyFor_Proxy
+ implements PersistentProxy<StringBuffer> {
+
+ String data;
+
+ public void initializeProxy(StringBuffer o) {
+ data = o.toString();
+ }
+
+ public StringBuffer convertProxy() {
+ return new StringBuffer(data);
+ }
+ }
+
+ @Entity
+ static class DisallowDeleteProxyFor
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ private void init() {
+ key = 99;
+ }
+
+ @Override
+ void configure(EntityModel model, StoreConfig config) {
+ model.registerClass(DisallowDeleteProxyFor_Proxy.class);
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowDeleteProxyFor>
+ index = store.getPrimaryIndex
+ (Integer.class, DisallowDeleteProxyFor.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Persistent
+ static class ArrayNameChange_Component {
+
+ int data;
+ }
+
+ @Entity
+ static class ArrayNameChange_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ ArrayNameChange_Component[] embed;
+ ArrayNameChange_Component embed2;
+
+ private void init() {
+ key = 99;
+ embed2 = new ArrayNameChange_Component();
+ embed2.data = 88;
+ embed = new ArrayNameChange_Component[] { embed2 };
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,ArrayNameChange_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, ArrayNameChange_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ enum AddEnumConstant_Enum {
+ A, B;
+ }
+
+ @Entity
+ static class AddEnumConstant_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ AddEnumConstant_Enum e1;
+ AddEnumConstant_Enum e2;
+
+ private void init() {
+ key = 99;
+ e1 = AddEnumConstant_Enum.A;
+ e2 = AddEnumConstant_Enum.B;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AddEnumConstant_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, AddEnumConstant_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ enum InsertEnumConstant_Enum {
+ A, B;
+ }
+
+ @Persistent
+ static class InsertEnumConstant_KeyClass
+ implements Comparable<InsertEnumConstant_KeyClass > {
+
+ @KeyField(1)
+ InsertEnumConstant_Enum key;
+
+ private InsertEnumConstant_KeyClass() {}
+
+ InsertEnumConstant_KeyClass(InsertEnumConstant_Enum key) {
+ this.key = key;
+ }
+
+ public int compareTo(InsertEnumConstant_KeyClass o) {
+ /* Use the natural order, in spite of insertions. */
+ return key.compareTo(o.key);
+ }
+ }
+
+ @Entity
+ static class InsertEnumConstant_Entity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ InsertEnumConstant_KeyClass secKey;
+
+ InsertEnumConstant_Enum e1;
+ InsertEnumConstant_Enum e2;
+
+ private void init() {
+ key = 99;
+ secKey =
+ new InsertEnumConstant_KeyClass(InsertEnumConstant_Enum.A);
+ e1 = InsertEnumConstant_Enum.A;
+ e2 = InsertEnumConstant_Enum.B;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,InsertEnumConstant_Entity>
+ index = store.getPrimaryIndex
+ (Integer.class, InsertEnumConstant_Entity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ enum DeleteEnumConstant_Enum {
+ A, B, C;
+ }
+
+ @Entity
+ static class DeleteEnumConstant_NoMutation
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ DeleteEnumConstant_Enum e1;
+ DeleteEnumConstant_Enum e2;
+ DeleteEnumConstant_Enum e3;
+
+ private void init() {
+ key = 99;
+ e1 = DeleteEnumConstant_Enum.A;
+ e2 = DeleteEnumConstant_Enum.B;
+ e3 = DeleteEnumConstant_Enum.C;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteEnumConstant_NoMutation>
+ index = store.getPrimaryIndex
+ (Integer.class, DeleteEnumConstant_NoMutation.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ /* Disabled until support for enum deletion is added.
+ @Entity
+ static class DeleteEnumConstant_WithConverter
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ DeleteEnumConstant_Enum e1;
+ DeleteEnumConstant_Enum e2;
+ DeleteEnumConstant_Enum e3;
+
+ private void init() {
+ key = 99;
+ e1 = DeleteEnumConstant_Enum.A;
+ e2 = DeleteEnumConstant_Enum.B;
+ e3 = DeleteEnumConstant_Enum.C;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DeleteEnumConstant_WithConverter>
+ index = store.getPrimaryIndex
+ (Integer.class, DeleteEnumConstant_WithConverter.class);
+ init();
+ index.put(this);
+ }
+ }
+ */
+
+ @Entity
+ static class DisallowChangeKeyRelate
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int skey;
+
+ private void init() {
+ key = 99;
+ skey = 88;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,DisallowChangeKeyRelate>
+ index = store.getPrimaryIndex
+ (Integer.class, DisallowChangeKeyRelate.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class AllowChangeKeyMetadata
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ int aa;
+
+ int addAnnotation;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int dropField;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int dropAnnotation;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int toBeRenamedField;
+
+ int ff;
+
+ private void init() {
+ key = 99;
+ addAnnotation = 88;
+ dropField = 77;
+ dropAnnotation = 66;
+ toBeRenamedField = 44;
+ aa = 33;
+ ff = 22;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowChangeKeyMetadata>
+ index = store.getPrimaryIndex
+ (Integer.class, AllowChangeKeyMetadata.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ /** [#16253] */
+ @Persistent
+ static class AllowChangeKeyMetadataInSubclass
+ extends AllowChangeKeyMetadataEntity {
+
+ int aa;
+
+ int addAnnotation;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int dropField;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int dropAnnotation;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ int toBeRenamedField;
+
+ int ff;
+
+ private void init() {
+ key = 99;
+ addAnnotation = 88;
+ dropField = 77;
+ dropAnnotation = 66;
+ toBeRenamedField = 44;
+ aa = 33;
+ ff = 22;
+ }
+
+ @Override
+ void configure(EntityModel model, StoreConfig config) {
+ model.registerClass(AllowChangeKeyMetadataInSubclass.class);
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,AllowChangeKeyMetadataEntity>
+ index = store.getPrimaryIndex
+ (Integer.class, AllowChangeKeyMetadataEntity.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ @Entity
+ static class AllowChangeKeyMetadataEntity
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+ }
+
+ /** [#15524] */
+ @Entity
+ static class AllowAddSecondary
+ extends EvolveCase {
+
+ @PrimaryKey
+ long key;
+
+ int a;
+ int b;
+
+ private void init() {
+ key = 99;
+ a = 1;
+ b = 2;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Long,AllowAddSecondary>
+ index = store.getPrimaryIndex
+ (Long.class, AllowAddSecondary.class);
+ init();
+ index.put(this);
+ }
+ }
+
+ /** [#15797] */
+ @Entity
+ static class FieldAddAndConvert
+ extends EvolveCase {
+
+ @PrimaryKey
+ int key;
+
+ private int f1 = 1;
+ private int f3 = 3;
+
+ private void init() {
+ key = 99;
+ }
+
+ @Override
+ void writeObjects(EntityStore store)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,FieldAddAndConvert>
+ index = store.getPrimaryIndex
+ (Integer.class, FieldAddAndConvert.class);
+ init();
+ index.put(this);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTest.java
new file mode 100644
index 0000000..c76a63a
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTest.java
@@ -0,0 +1,255 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.persist.test;
+
+import java.io.IOException;
+
+import junit.framework.Test;
+
+import com.sleepycat.persist.evolve.EvolveConfig;
+import com.sleepycat.persist.evolve.EvolveEvent;
+import com.sleepycat.persist.evolve.EvolveListener;
+import com.sleepycat.persist.evolve.EvolveStats;
+import com.sleepycat.persist.impl.PersistCatalog;
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * Runs part two of the EvolveTest. This part is run with the new/updated
+ * version of EvolveClasses in the classpath. It uses the environment and
+ * store created by EvolveTestInit. It verifies that it can read/write/evolve
+ * objects serialized using the old class format, and that it can create new
+ * objects with the new class format.
+ *
+ * @author Mark Hayes
+ */
+public class EvolveTest extends EvolveTestBase {
+
+ /* Toggle to use listener every other test case. */
+ private static boolean useEvolveListener;
+
+ public static Test suite()
+ throws Exception {
+
+ return getSuite(EvolveTest.class);
+ }
+
+ private int evolveNRead;
+ private int evolveNConverted;
+
+ boolean useEvolvedClass() {
+ return true;
+ }
+
+ @Override
+ public void tearDown() {
+ try { super.tearDown(); } catch (Throwable e) { }
+ }
+
+ @Override
+ public void setUp()
+ throws IOException {
+
+ /* Copy the log files created by EvolveTestInit. */
+ envHome = getTestInitHome(true /*evolved*/);
+ envHome.mkdirs();
+ SharedTestUtils.emptyDir(envHome);
+ SharedTestUtils.copyFiles(getTestInitHome(false /*evolved*/), envHome);
+ }
+
+ public void testLazyEvolve()
+ throws Exception {
+
+ openEnv();
+
+ /*
+ * Open in raw mode to check unevolved raw object and formats. This
+ * is possible whether or not we can open the store further below to
+ * evolve formats without errors.
+ */
+ openRawStore();
+ caseObj.checkUnevolvedModel(rawStore.getModel(), env);
+ caseObj.readRawObjects
+ (rawStore, false /*expectEvolved*/, false /*expectUpdated*/);
+ closeRawStore();
+
+ if (openStoreReadWrite()) {
+
+ /*
+ * When opening read-write, formats are evolved lazily. Check by
+ * reading evolved objects.
+ */
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readObjects(store, false /*doUpdate*/);
+ closeStore();
+
+ /*
+ * Read raw objects again to check that the evolved objects are
+ * returned even though the stored objects were not evolved.
+ */
+ openRawStore();
+ caseObj.checkEvolvedModel
+ (rawStore.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readRawObjects
+ (rawStore, true /*expectEvolved*/, false /*expectUpdated*/);
+ closeRawStore();
+
+ /*
+ * Open read-only to ensure that the catalog does not need to
+ * change (evolve formats) unnecessarily.
+ */
+ PersistCatalog.expectNoClassChanges = true;
+ try {
+ openStoreReadOnly();
+ } finally {
+ PersistCatalog.expectNoClassChanges = false;
+ }
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readObjects(store, false /*doUpdate*/);
+ closeStore();
+
+ /*
+ * Open read-write to update objects and store them in evolved
+ * format.
+ */
+ openStoreReadWrite();
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readObjects(store, true /*doUpdate*/);
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ closeStore();
+
+ /*
+ * Check raw objects again after the evolved objects were stored.
+ */
+ openRawStore();
+ caseObj.checkEvolvedModel
+ (rawStore.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readRawObjects
+ (rawStore, true /*expectEvolved*/, true /*expectUpdated*/);
+ closeRawStore();
+ }
+
+ closeAll();
+ }
+
+ public void testEagerEvolve()
+ throws Exception {
+
+ /* If the store cannot be opened, this test is not appropriate. */
+ if (caseObj.getStoreOpenException() != null) {
+ return;
+ }
+
+ EvolveConfig config = new EvolveConfig();
+
+ /*
+ * Use listener every other time to ensure that the stats are returned
+ * correctly when no listener is configured. [#17024]
+ */
+ useEvolveListener = !useEvolveListener;
+ if (useEvolveListener) {
+ config.setEvolveListener(new EvolveListener() {
+ public boolean evolveProgress(EvolveEvent event) {
+ EvolveStats stats = event.getStats();
+ evolveNRead = stats.getNRead();
+ evolveNConverted = stats.getNConverted();
+ return true;
+ }
+ });
+ }
+
+ openEnv();
+
+ openStoreReadWrite();
+
+ /*
+ * Evolve and expect that the expected number of entities are
+ * converted.
+ */
+ int nExpected = caseObj.getNRecordsExpected();
+ evolveNRead = 0;
+ evolveNConverted = 0;
+ PersistCatalog.unevolvedFormatsEncountered = false;
+ EvolveStats stats = store.evolve(config);
+ if (nExpected > 0) {
+ assertTrue(PersistCatalog.unevolvedFormatsEncountered);
+ }
+ assertTrue(stats.getNRead() == nExpected);
+ assertTrue(stats.getNConverted() == nExpected);
+ assertTrue(stats.getNConverted() >= stats.getNRead());
+ if (useEvolveListener) {
+ assertEquals(evolveNRead, stats.getNRead());
+ assertEquals(evolveNConverted, stats.getNConverted());
+ }
+
+ /* Evolve again and expect that no entities are converted. */
+ evolveNRead = 0;
+ evolveNConverted = 0;
+ PersistCatalog.unevolvedFormatsEncountered = false;
+ stats = store.evolve(config);
+ assertTrue(!PersistCatalog.unevolvedFormatsEncountered);
+ assertEquals(0, stats.getNRead());
+ assertEquals(0, stats.getNConverted());
+ if (useEvolveListener) {
+ assertTrue(evolveNRead == 0);
+ assertTrue(evolveNConverted == 0);
+ }
+
+ /* Ensure that we can read all entities without evolution. */
+ PersistCatalog.unevolvedFormatsEncountered = false;
+ caseObj.readObjects(store, false /*doUpdate*/);
+ assertTrue(!PersistCatalog.unevolvedFormatsEncountered);
+
+ /*
+ * When automatic unused type deletion is implemented in the future the
+ * oldTypesExist parameters below should be changed to false.
+ */
+
+ /* Open again and try an update. */
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readObjects(store, true /*doUpdate*/);
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ closeStore();
+
+ /* Open read-only and double check that everything is OK. */
+ openStoreReadOnly();
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readObjects(store, false /*doUpdate*/);
+ caseObj.checkEvolvedModel
+ (store.getModel(), env, true /*oldTypesExist*/);
+ closeStore();
+
+ /* Check raw objects. */
+ openRawStore();
+ caseObj.checkEvolvedModel
+ (rawStore.getModel(), env, true /*oldTypesExist*/);
+ caseObj.readRawObjects
+ (rawStore, true /*expectEvolved*/, true /*expectUpdated*/);
+
+ /*
+ * Test copy raw object to new store via convertRawObject. In this
+ * test we can pass false for oldTypesExist because newStore starts
+ * with the new/evolved class model.
+ */
+ openNewStore();
+ caseObj.copyRawObjects(rawStore, newStore);
+ caseObj.readObjects(newStore, true /*doUpdate*/);
+ caseObj.checkEvolvedModel
+ (newStore.getModel(), env, false /*oldTypesExist*/);
+ closeNewStore();
+ closeRawStore();
+
+ closeAll();
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestBase.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestBase.java
new file mode 100644
index 0000000..7b97dcd
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestBase.java
@@ -0,0 +1,438 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.persist.test;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.util.Enumeration;
+
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * Base class for EvolveTest and EvolveTestInit.
+ *
+ * @author Mark Hayes
+ */
+public abstract class EvolveTestBase extends TestCase {
+
+ /*
+ * When adding a evolve test class, three places need to be changed:
+ * 1) Add the unmodified class to EvolveClass.java.original.
+ * 2) Add the modified class to EvolveClass.java.
+ * 3) Add the class name to the ALL list below as a pair of strings. The
+ * first string in each pair is the name of the original class, and the
+ * second string is the name of the evolved class or null if the evolved
+ * name is the same as the original. The index in the list identifies a
+ * test case, and the class at that position identifies the old and new
+ * class to use for the test.
+ */
+ private static final String[] ALL = {
+//*
+ "DeletedEntity1_ClassRemoved",
+ "DeletedEntity1_ClassRemoved_NoMutation",
+ "DeletedEntity2_ClassRemoved",
+ "DeletedEntity2_ClassRemoved_WithDeleter",
+ "DeletedEntity3_AnnotRemoved_NoMutation",
+ null,
+ "DeletedEntity4_AnnotRemoved_WithDeleter",
+ null,
+ "DeletedEntity5_EntityToPersist_NoMutation",
+ null,
+ "DeletedEntity6_EntityToPersist_WithDeleter",
+ null,
+ "DeletedPersist1_ClassRemoved_NoMutation",
+ null,
+ "DeletedPersist2_ClassRemoved_WithDeleter",
+ null,
+ "DeletedPersist3_AnnotRemoved_NoMutation",
+ null,
+ "DeletedPersist4_AnnotRemoved_WithDeleter",
+ null,
+ "DeletedPersist5_PersistToEntity_NoMutation",
+ null,
+ "DeletedPersist6_PersistToEntity_WithDeleter",
+ null,
+ "RenamedEntity1_NewEntityName",
+ "RenamedEntity1_NewEntityName_NoMutation",
+ "RenamedEntity2_NewEntityName",
+ "RenamedEntity2_NewEntityName_WithRenamer",
+ "DeleteSuperclass1_NoMutation",
+ null,
+ "DeleteSuperclass2_WithConverter",
+ null,
+ "DeleteSuperclass3_WithDeleter",
+ null,
+ "DeleteSuperclass4_NoFields",
+ null,
+ "DeleteSuperclass5_Top",
+ null,
+ "InsertSuperclass1_Between",
+ null,
+ "InsertSuperclass2_Top",
+ null,
+ "DisallowNonKeyField_PrimitiveToObject",
+ null,
+ "DisallowNonKeyField_ObjectToPrimitive",
+ null,
+ "DisallowNonKeyField_ObjectToSubtype",
+ null,
+ "DisallowNonKeyField_ObjectToUnrelatedSimple",
+ null,
+ "DisallowNonKeyField_ObjectToUnrelatedOther",
+ null,
+ "DisallowNonKeyField_byte2boolean",
+ null,
+ "DisallowNonKeyField_short2byte",
+ null,
+ "DisallowNonKeyField_int2short",
+ null,
+ "DisallowNonKeyField_long2int",
+ null,
+ "DisallowNonKeyField_float2long",
+ null,
+ "DisallowNonKeyField_double2float",
+ null,
+ "DisallowNonKeyField_Byte2byte",
+ null,
+ "DisallowNonKeyField_Character2char",
+ null,
+ "DisallowNonKeyField_Short2short",
+ null,
+ "DisallowNonKeyField_Integer2int",
+ null,
+ "DisallowNonKeyField_Long2long",
+ null,
+ "DisallowNonKeyField_Float2float",
+ null,
+ "DisallowNonKeyField_Double2double",
+ null,
+ "DisallowNonKeyField_float2BigInt",
+ null,
+ "DisallowNonKeyField_BigInt2long",
+ null,
+ "DisallowSecKeyField_byte2short",
+ null,
+ "DisallowSecKeyField_char2int",
+ null,
+ "DisallowSecKeyField_short2int",
+ null,
+ "DisallowSecKeyField_int2long",
+ null,
+ "DisallowSecKeyField_long2float",
+ null,
+ "DisallowSecKeyField_float2double",
+ null,
+ "DisallowSecKeyField_Byte2short2",
+ null,
+ "DisallowSecKeyField_Character2int",
+ null,
+ "DisallowSecKeyField_Short2int2",
+ null,
+ "DisallowSecKeyField_Integer2long",
+ null,
+ "DisallowSecKeyField_Long2float2",
+ null,
+ "DisallowSecKeyField_Float2double2",
+ null,
+ "DisallowSecKeyField_int2BigInt",
+ null,
+ "DisallowPriKeyField_byte2short",
+ null,
+ "DisallowPriKeyField_char2int",
+ null,
+ "DisallowPriKeyField_short2int",
+ null,
+ "DisallowPriKeyField_int2long",
+ null,
+ "DisallowPriKeyField_long2float",
+ null,
+ "DisallowPriKeyField_float2double",
+ null,
+ "DisallowPriKeyField_Byte2short2",
+ null,
+ "DisallowPriKeyField_Character2int",
+ null,
+ "DisallowPriKeyField_Short2int2",
+ null,
+ "DisallowPriKeyField_Integer2long",
+ null,
+ "DisallowPriKeyField_Long2float2",
+ null,
+ "DisallowPriKeyField_Float2double2",
+ null,
+ "DisallowPriKeyField_Long2BigInt",
+ null,
+ "DisallowCompositeKeyField_byte2short",
+ null,
+ "AllowPriKeyField_Byte2byte2",
+ null,
+ "AllowPriKeyField_byte2Byte",
+ null,
+ "AllowFieldTypeChanges",
+ null,
+ "ConvertFieldContent_Entity",
+ null,
+ "ConvertExample1_Entity",
+ null,
+ "ConvertExample2_Person",
+ null,
+ "ConvertExample3_Person",
+ null,
+ "ConvertExample3Reverse_Person",
+ null,
+ "ConvertExample4_Entity",
+ null,
+ "ConvertExample5_Entity",
+ null,
+ "AllowFieldAddDelete",
+ null,
+ "ProxiedClass_Entity",
+ null,
+ "DisallowChangeProxyFor",
+ null,
+ "DisallowDeleteProxyFor",
+ null,
+ "ArrayNameChange_Entity",
+ null,
+ "AddEnumConstant_Entity",
+ null,
+ "InsertEnumConstant_Entity",
+ null,
+ "DeleteEnumConstant_NoMutation",
+ null,
+ "DisallowChangeKeyRelate",
+ null,
+ "AllowChangeKeyMetadata",
+ null,
+ "AllowChangeKeyMetadataInSubclass",
+ null,
+ "AllowAddSecondary",
+ null,
+ "FieldAddAndConvert",
+ null,
+//*/
+ };
+
+ File envHome;
+ Environment env;
+ EntityStore store;
+ RawStore rawStore;
+ EntityStore newStore;
+ String caseClsName;
+ Class caseCls;
+ EvolveCase caseObj;
+ String caseLabel;
+
+ static TestSuite getSuite(Class testClass)
+ throws Exception {
+
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < ALL.length; i += 2) {
+ String originalClsName = ALL[i];
+ String evolvedClsName = ALL[i + 1];
+ if (evolvedClsName == null) {
+ evolvedClsName = originalClsName;
+ }
+ TestSuite baseSuite = new TestSuite(testClass);
+ Enumeration e = baseSuite.tests();
+ while (e.hasMoreElements()) {
+ EvolveTestBase test = (EvolveTestBase) e.nextElement();
+ test.init(originalClsName, evolvedClsName);
+ suite.addTest(test);
+ }
+ }
+ return suite;
+ }
+
+ private void init(String originalClsName,
+ String evolvedClsName)
+ throws Exception {
+
+ String caseClsName = useEvolvedClass() ?
+ evolvedClsName : originalClsName;
+ caseClsName = "com.sleepycat.persist.test.EvolveClasses$" +
+ caseClsName;
+
+ this.caseClsName = caseClsName;
+ this.caseCls = Class.forName(caseClsName);
+ this.caseObj = (EvolveCase) caseCls.newInstance();
+ this.caseLabel = evolvedClsName;
+ }
+
+ abstract boolean useEvolvedClass();
+
+ File getTestInitHome(boolean evolved) {
+ return new File
+ (System.getProperty("testevolvedir"),
+ (evolved ? "evolved" : "original") + '/' + caseLabel);
+ }
+
+ @Override
+ public void tearDown() {
+
+ /* Set test name for reporting; cannot be done in the ctor or setUp. */
+ setName(caseLabel + '-' + getName());
+
+ if (env != null) {
+ try {
+ closeAll();
+ } catch (Throwable e) {
+ System.out.println("During tearDown: " + e);
+ }
+ }
+ envHome = null;
+ env = null;
+ store = null;
+ caseCls = null;
+ caseObj = null;
+ caseLabel = null;
+
+ /* Do not delete log files so they can be used by 2nd phase of test. */
+ }
+
+ /**
+ * @throws FileNotFoundException from DB core.
+ */
+ void openEnv()
+ throws FileNotFoundException, DatabaseException {
+
+ EnvironmentConfig config = TestEnv.TXN.getConfig();
+ config.setAllowCreate(true);
+ env = new Environment(envHome, config);
+ }
+
+ /**
+ * Returns true if the store was opened successfully. Returns false if the
+ * store could not be opened because an exception was expected -- this is
+ * not a test failure but no further tests for an EntityStore may be run.
+ */
+ private boolean openStore(StoreConfig config)
+ throws Exception {
+
+ config.setTransactional(true);
+ config.setMutations(caseObj.getMutations());
+
+ EntityModel model = new AnnotationModel();
+ config.setModel(model);
+ caseObj.configure(model, config);
+
+ String expectException = caseObj.getStoreOpenException();
+ try {
+ store = new EntityStore(env, EvolveCase.STORE_NAME, config);
+ if (expectException != null) {
+ fail("Expected: " + expectException);
+ }
+ } catch (Exception e) {
+ if (expectException != null) {
+ //e.printStackTrace();
+ String actualMsg = e.getMessage();
+ EvolveCase.checkEquals
+ (expectException,
+ e.getClass().getName() + ": " + actualMsg);
+ return false;
+ } else {
+ throw e;
+ }
+ }
+ return true;
+ }
+
+ boolean openStoreReadOnly()
+ throws Exception {
+
+ StoreConfig config = new StoreConfig();
+ config.setReadOnly(true);
+ return openStore(config);
+ }
+
+ boolean openStoreReadWrite()
+ throws Exception {
+
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(true);
+ return openStore(config);
+ }
+
+ void openRawStore()
+ throws DatabaseException {
+
+ StoreConfig config = new StoreConfig();
+ config.setTransactional(true);
+ rawStore = new RawStore(env, EvolveCase.STORE_NAME, config);
+ }
+
+ void closeStore()
+ throws DatabaseException {
+
+ if (store != null) {
+ store.close();
+ store = null;
+ }
+ }
+
+ void openNewStore()
+ throws Exception {
+
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(true);
+ config.setTransactional(true);
+
+ EntityModel model = new AnnotationModel();
+ config.setModel(model);
+ caseObj.configure(model, config);
+
+ newStore = new EntityStore(env, "new", config);
+ }
+
+ void closeNewStore()
+ throws DatabaseException {
+
+ if (newStore != null) {
+ newStore.close();
+ newStore = null;
+ }
+ }
+
+ void closeRawStore()
+ throws DatabaseException {
+
+ if (rawStore != null) {
+ rawStore.close();
+ rawStore = null;
+ }
+ }
+
+ void closeEnv()
+ throws DatabaseException {
+
+ if (env != null) {
+ env.close();
+ env = null;
+ }
+ }
+
+ void closeAll()
+ throws DatabaseException {
+
+ closeStore();
+ closeRawStore();
+ closeNewStore();
+ closeEnv();
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestInit.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestInit.java
new file mode 100644
index 0000000..06ed2a7
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/EvolveTestInit.java
@@ -0,0 +1,53 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.persist.test;
+
+import junit.framework.Test;
+
+import com.sleepycat.util.test.SharedTestUtils;
+
+/**
+ * Runs part one of the EvolveTest. This part is run with the old/original
+ * version of EvolveClasses in the classpath. It creates a fresh environment
+ * and store containing instances of the original class. When EvolveTest is
+ * run, it will read/write/evolve these objects from the store created here.
+ *
+ * @author Mark Hayes
+ */
+public class EvolveTestInit extends EvolveTestBase {
+
+ public static Test suite()
+ throws Exception {
+
+ return getSuite(EvolveTestInit.class);
+ }
+
+ @Override
+ boolean useEvolvedClass() {
+ return false;
+ }
+
+ @Override
+ public void setUp() {
+ envHome = getTestInitHome(false /*evolved*/);
+ envHome.mkdirs();
+ SharedTestUtils.emptyDir(envHome);
+ }
+
+ public void testInit()
+ throws Exception {
+
+ openEnv();
+ if (!openStoreReadWrite()) {
+ fail();
+ }
+ caseObj.writeObjects(store);
+ caseObj.checkUnevolvedModel(store.getModel(), env);
+ closeAll();
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/ForeignKeyTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/ForeignKeyTest.java
new file mode 100644
index 0000000..741452c
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/ForeignKeyTest.java
@@ -0,0 +1,329 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.DeleteAction.ABORT;
+import static com.sleepycat.persist.model.DeleteAction.CASCADE;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.util.Enumeration;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Transaction;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.DeleteAction;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * @author Mark Hayes
+ */
+public class ForeignKeyTest extends TxnTestCase {
+
+ private static final DeleteAction[] ACTIONS = {
+ ABORT,
+ NULLIFY,
+ CASCADE,
+ };
+
+ private static final String[] ACTION_LABELS = {
+ "ABORT",
+ "NULLIFY",
+ "CASCADE",
+ };
+
+ static protected Class<?> testClass = ForeignKeyTest.class;
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < ACTIONS.length; i += 1) {
+ for (int j = 0; j < 2; j++) {
+ TestSuite txnSuite = txnTestSuite(testClass, null, null);
+ Enumeration e = txnSuite.tests();
+ while (e.hasMoreElements()) {
+ ForeignKeyTest test = (ForeignKeyTest) e.nextElement();
+ test.onDelete = ACTIONS[i];
+ test.onDeleteLabel = ACTION_LABELS[i];
+ test.useSubclass = (j == 0);
+ test.useSubclassLabel =
+ (j == 0) ? "UseSubclass" : "UseBaseclass";
+ suite.addTest(test);
+ }
+ }
+ }
+ return suite;
+ }
+
+ private EntityStore store;
+ private PrimaryIndex<String,Entity1> pri1;
+ private PrimaryIndex<String,Entity2> pri2;
+ private SecondaryIndex<String,String,Entity1> sec1;
+ private SecondaryIndex<String,String,Entity2> sec2;
+ private DeleteAction onDelete;
+ private String onDeleteLabel;
+ private boolean useSubclass;
+ private String useSubclassLabel;
+
+ @Override
+ public void tearDown()
+ throws Exception {
+
+ super.tearDown();
+ setName(getName() + '-' + onDeleteLabel + "-" + useSubclassLabel);
+ }
+
+ private void open()
+ throws DatabaseException {
+
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(envConfig.getAllowCreate());
+ config.setTransactional(envConfig.getTransactional());
+
+ store = new EntityStore(env, "test", config);
+
+ pri1 = store.getPrimaryIndex(String.class, Entity1.class);
+ sec1 = store.getSecondaryIndex(pri1, String.class, "sk");
+ pri2 = store.getPrimaryIndex(String.class, Entity2.class);
+ sec2 = store.getSecondaryIndex
+ (pri2, String.class, "sk_" + onDeleteLabel);
+ }
+
+ private void close()
+ throws DatabaseException {
+
+ store.close();
+ }
+
+ public void testForeignKeys()
+ throws Exception {
+
+ open();
+ Transaction txn = txnBegin();
+
+ Entity1 o1 = new Entity1("pk1", "sk1");
+ assertNull(pri1.put(txn, o1));
+
+ assertEquals(o1, pri1.get(txn, "pk1", null));
+ assertEquals(o1, sec1.get(txn, "sk1", null));
+
+ Entity2 o2 = (useSubclass ?
+ new Entity3("pk2", "pk1", onDelete) :
+ new Entity2("pk2", "pk1", onDelete));
+ assertNull(pri2.put(txn, o2));
+
+ assertEquals(o2, pri2.get(txn, "pk2", null));
+ assertEquals(o2, sec2.get(txn, "pk1", null));
+
+ txnCommit(txn);
+ txn = txnBegin();
+
+ /*
+ * pri1 contains o1 with primary key "pk1" and index key "sk1".
+ *
+ * pri2 contains o2 with primary key "pk2" and foreign key "pk1",
+ * which is the primary key of pri1.
+ */
+ if (onDelete == ABORT) {
+
+ /* Test that we abort trying to delete a referenced key. */
+
+ try {
+ pri1.delete(txn, "pk1");
+ fail();
+ } catch (DatabaseException expected) {
+ assertTrue(!DbCompat.NEW_JE_EXCEPTIONS);
+ txnAbort(txn);
+ txn = txnBegin();
+ }
+
+ /*
+ * Test that we can put a record into store2 with a null foreign
+ * key value.
+ */
+ o2 = (useSubclass ?
+ new Entity3("pk2", null, onDelete) :
+ new Entity2("pk2", null, onDelete));
+ assertNotNull(pri2.put(txn, o2));
+ assertEquals(o2, pri2.get(txn, "pk2", null));
+
+ /*
+ * The index2 record should have been deleted since the key was set
+ * to null above.
+ */
+ assertNull(sec2.get(txn, "pk1", null));
+
+ /*
+ * Test that now we can delete the record in store1, since it is no
+ * longer referenced.
+ */
+ assertNotNull(pri1.delete(txn, "pk1"));
+ assertNull(pri1.get(txn, "pk1", null));
+ assertNull(sec1.get(txn, "sk1", null));
+
+ } else if (onDelete == NULLIFY) {
+
+ /* Delete the referenced key. */
+ assertNotNull(pri1.delete(txn, "pk1"));
+ assertNull(pri1.get(txn, "pk1", null));
+ assertNull(sec1.get(txn, "sk1", null));
+
+ /*
+ * The store2 record should still exist, but should have an empty
+ * secondary key since it was nullified.
+ */
+ o2 = pri2.get(txn, "pk2", null);
+ assertNotNull(o2);
+ assertEquals("pk2", o2.pk);
+ assertEquals(null, o2.getSk(onDelete));
+
+ } else if (onDelete == CASCADE) {
+
+ /* Delete the referenced key. */
+ assertNotNull(pri1.delete(txn, "pk1"));
+ assertNull(pri1.get(txn, "pk1", null));
+ assertNull(sec1.get(txn, "sk1", null));
+
+ /* The store2 record should have deleted also. */
+ assertNull(pri2.get(txn, "pk2", null));
+ assertNull(sec2.get(txn, "pk1", null));
+
+ } else {
+ throw new IllegalStateException();
+ }
+
+ /*
+ * Test that a foreign key value may not be used that is not present in
+ * the foreign store. "pk2" is not in store1 in this case.
+ */
+ Entity2 o3 = (useSubclass ?
+ new Entity3("pk3", "pk2", onDelete) :
+ new Entity2("pk3", "pk2", onDelete));
+ try {
+ pri2.put(txn, o3);
+ fail();
+ } catch (DatabaseException expected) {
+ assertTrue(!DbCompat.NEW_JE_EXCEPTIONS);
+ }
+
+ txnAbort(txn);
+ close();
+ }
+
+ @Entity
+ static class Entity1 {
+
+ @PrimaryKey
+ String pk;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ String sk;
+
+ private Entity1() {}
+
+ Entity1(String pk, String sk) {
+ this.pk = pk;
+ this.sk = sk;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ Entity1 o = (Entity1) other;
+ return nullOrEqual(pk, o.pk) &&
+ nullOrEqual(sk, o.sk);
+ }
+ }
+
+ @Entity
+ static class Entity2 {
+
+ @PrimaryKey
+ String pk;
+
+ @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class,
+ onRelatedEntityDelete=ABORT)
+ String sk_ABORT;
+
+ @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class,
+ onRelatedEntityDelete=CASCADE)
+ String sk_CASCADE;
+
+ @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class,
+ onRelatedEntityDelete=NULLIFY)
+ String sk_NULLIFY;
+
+ private Entity2() {}
+
+ Entity2(String pk, String sk, DeleteAction action) {
+ this.pk = pk;
+ switch (action) {
+ case ABORT:
+ sk_ABORT = sk;
+ break;
+ case CASCADE:
+ sk_CASCADE = sk;
+ break;
+ case NULLIFY:
+ sk_NULLIFY = sk;
+ break;
+ default:
+ throw new IllegalArgumentException();
+ }
+ }
+
+ String getSk(DeleteAction action) {
+ switch (action) {
+ case ABORT:
+ return sk_ABORT;
+ case CASCADE:
+ return sk_CASCADE;
+ case NULLIFY:
+ return sk_NULLIFY;
+ default:
+ throw new IllegalArgumentException();
+ }
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ Entity2 o = (Entity2) other;
+ return nullOrEqual(pk, o.pk) &&
+ nullOrEqual(sk_ABORT, o.sk_ABORT) &&
+ nullOrEqual(sk_CASCADE, o.sk_CASCADE) &&
+ nullOrEqual(sk_NULLIFY, o.sk_NULLIFY);
+ }
+ }
+
+ @Persistent
+ static class Entity3 extends Entity2 {
+ Entity3() {}
+
+ Entity3(String pk, String sk, DeleteAction action) {
+ super(pk, sk, action);
+ }
+ }
+
+ static boolean nullOrEqual(Object o1, Object o2) {
+ if (o1 == null) {
+ return o2 == null;
+ } else {
+ return o1.equals(o2);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/IndexTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/IndexTest.java
new file mode 100644
index 0000000..d4478c5
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/IndexTest.java
@@ -0,0 +1,874 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import junit.framework.Test;
+
+import com.sleepycat.collections.MapEntryParameter;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Transaction;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityIndex;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawObject;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.persist.raw.RawType;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Tests EntityIndex and EntityCursor in all their permutations.
+ *
+ * @author Mark Hayes
+ */
+public class IndexTest extends TxnTestCase {
+
+ private static final int N_RECORDS = 5;
+ private static final int THREE_TO_ONE = 3;
+
+ static protected Class<?> testClass = IndexTest.class;
+
+ public static Test suite() {
+ return txnTestSuite(testClass, null,
+ null);
+ //new String[] { TxnTestCase.TXN_NULL});
+ }
+
+ private EntityStore store;
+ private PrimaryIndex<Integer,MyEntity> primary;
+ private SecondaryIndex<Integer,Integer,MyEntity> oneToOne;
+ private SecondaryIndex<Integer,Integer,MyEntity> manyToOne;
+ private SecondaryIndex<Integer,Integer,MyEntity> oneToMany;
+ private SecondaryIndex<Integer,Integer,MyEntity> manyToMany;
+ private RawStore rawStore;
+ private RawType entityType;
+ private PrimaryIndex<Object,RawObject> primaryRaw;
+ private SecondaryIndex<Object,Object,RawObject> oneToOneRaw;
+ private SecondaryIndex<Object,Object,RawObject> manyToOneRaw;
+ private SecondaryIndex<Object,Object,RawObject> oneToManyRaw;
+ private SecondaryIndex<Object,Object,RawObject> manyToManyRaw;
+
+ /**
+ * Opens the store.
+ */
+ private void open()
+ throws DatabaseException {
+
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(envConfig.getAllowCreate());
+ config.setTransactional(envConfig.getTransactional());
+
+ store = new EntityStore(env, "test", config);
+
+ primary = store.getPrimaryIndex(Integer.class, MyEntity.class);
+ oneToOne =
+ store.getSecondaryIndex(primary, Integer.class, "oneToOne");
+ manyToOne =
+ store.getSecondaryIndex(primary, Integer.class, "manyToOne");
+ oneToMany =
+ store.getSecondaryIndex(primary, Integer.class, "oneToMany");
+ manyToMany =
+ store.getSecondaryIndex(primary, Integer.class, "manyToMany");
+
+ assertNotNull(primary);
+ assertNotNull(oneToOne);
+ assertNotNull(manyToOne);
+ assertNotNull(oneToMany);
+ assertNotNull(manyToMany);
+
+ rawStore = new RawStore(env, "test", config);
+ String clsName = MyEntity.class.getName();
+ entityType = rawStore.getModel().getRawType(clsName);
+ assertNotNull(entityType);
+
+ primaryRaw = rawStore.getPrimaryIndex(clsName);
+ oneToOneRaw = rawStore.getSecondaryIndex(clsName, "oneToOne");
+ manyToOneRaw = rawStore.getSecondaryIndex(clsName, "manyToOne");
+ oneToManyRaw = rawStore.getSecondaryIndex(clsName, "oneToMany");
+ manyToManyRaw = rawStore.getSecondaryIndex(clsName, "manyToMany");
+
+ assertNotNull(primaryRaw);
+ assertNotNull(oneToOneRaw);
+ assertNotNull(manyToOneRaw);
+ assertNotNull(oneToManyRaw);
+ assertNotNull(manyToManyRaw);
+ }
+
+ /**
+ * Closes the store.
+ */
+ private void close()
+ throws DatabaseException {
+
+ store.close();
+ store = null;
+ rawStore.close();
+ rawStore = null;
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ super.setUp();
+ }
+
+ /**
+ * The store must be closed before closing the environment.
+ */
+ @Override
+ public void tearDown()
+ throws Exception {
+
+ try {
+ if (rawStore != null) {
+ rawStore.close();
+ }
+ } catch (Throwable e) {
+ System.out.println("During tearDown: " + e);
+ }
+ try {
+ if (store != null) {
+ store.close();
+ }
+ } catch (Throwable e) {
+ System.out.println("During tearDown: " + e);
+ }
+ store = null;
+ rawStore = null;
+ super.tearDown();
+ }
+
+ /**
+ * Primary keys: {0, 1, 2, 3, 4}
+ */
+ public void testPrimary()
+ throws DatabaseException {
+
+ SortedMap<Integer,SortedSet<Integer>> expected =
+ new TreeMap<Integer,SortedSet<Integer>>();
+
+ for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+ SortedSet<Integer> values = new TreeSet<Integer>();
+ values.add(priKey);
+ expected.put(priKey, values);
+ }
+
+ open();
+ addEntities(primary);
+ checkIndex(primary, expected, keyGetter, entityGetter);
+ checkIndex(primaryRaw, expected, rawKeyGetter, rawEntityGetter);
+
+ /* Close and reopen, then recheck indices. */
+ close();
+ open();
+ checkIndex(primary, expected, keyGetter, entityGetter);
+ checkIndex(primaryRaw, expected, rawKeyGetter, rawEntityGetter);
+
+ /* Check primary delete, last key first for variety. */
+ for (int priKey = N_RECORDS - 1; priKey >= 0; priKey -= 1) {
+ boolean useRaw = ((priKey & 1) != 0);
+ Transaction txn = txnBegin();
+ if (useRaw) {
+ primaryRaw.delete(txn, priKey);
+ } else {
+ primary.delete(txn, priKey);
+ }
+ txnCommit(txn);
+ expected.remove(priKey);
+ checkIndex(primary, expected, keyGetter, entityGetter);
+ }
+ checkAllEmpty();
+
+ /* Check PrimaryIndex put operations. */
+ MyEntity e;
+ Transaction txn = txnBegin();
+ /* put() */
+ e = primary.put(txn, new MyEntity(1));
+ assertNull(e);
+ e = primary.get(txn, 1, null);
+ assertEquals(1, e.key);
+ /* putNoReturn() */
+ primary.putNoReturn(txn, new MyEntity(2));
+ e = primary.get(txn, 2, null);
+ assertEquals(2, e.key);
+ /* putNoOverwrite */
+ assertTrue(!primary.putNoOverwrite(txn, new MyEntity(1)));
+ assertTrue(!primary.putNoOverwrite(txn, new MyEntity(2)));
+ assertTrue(primary.putNoOverwrite(txn, new MyEntity(3)));
+ e = primary.get(txn, 3, null);
+ assertEquals(3, e.key);
+ txnCommit(txn);
+ close();
+ }
+
+ /**
+ * { 0:0, 1:-1, 2:-2, 3:-3, 4:-4 }
+ */
+ public void testOneToOne()
+ throws DatabaseException {
+
+ SortedMap<Integer,SortedSet<Integer>> expected =
+ new TreeMap<Integer,SortedSet<Integer>>();
+
+ for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+ SortedSet<Integer> values = new TreeSet<Integer>();
+ values.add(priKey);
+ Integer secKey = (-priKey);
+ expected.put(secKey, values);
+ }
+
+ open();
+ addEntities(primary);
+ checkSecondary(oneToOne, oneToOneRaw, expected);
+ checkDelete(oneToOne, oneToOneRaw, expected);
+ close();
+ }
+
+ /**
+ * { 0:0, 1:1, 2:2, 3:0, 4:1 }
+ */
+ public void testManyToOne()
+ throws DatabaseException {
+
+ SortedMap<Integer,SortedSet<Integer>> expected =
+ new TreeMap<Integer,SortedSet<Integer>>();
+
+ for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+ Integer secKey = priKey % THREE_TO_ONE;
+ SortedSet<Integer> values = expected.get(secKey);
+ if (values == null) {
+ values = new TreeSet<Integer>();
+ expected.put(secKey, values);
+ }
+ values.add(priKey);
+ }
+
+ open();
+ addEntities(primary);
+ checkSecondary(manyToOne, manyToOneRaw, expected);
+ checkDelete(manyToOne, manyToOneRaw, expected);
+ close();
+ }
+
+ /**
+ * { 0:{}, 1:{10}, 2:{20,21}, 3:{30,31,32}, 4:{40,41,42,43}
+ */
+ public void testOneToMany()
+ throws DatabaseException {
+
+ SortedMap<Integer,SortedSet<Integer>> expected =
+ new TreeMap<Integer,SortedSet<Integer>>();
+
+ for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+ for (int i = 0; i < priKey; i += 1) {
+ Integer secKey = (N_RECORDS * priKey) + i;
+ SortedSet<Integer> values = expected.get(secKey);
+ if (values == null) {
+ values = new TreeSet<Integer>();
+ expected.put(secKey, values);
+ }
+ values.add(priKey);
+ }
+ }
+
+ open();
+ addEntities(primary);
+ checkSecondary(oneToMany, oneToManyRaw, expected);
+ checkDelete(oneToMany, oneToManyRaw, expected);
+ close();
+ }
+
+ /**
+ * { 0:{}, 1:{0}, 2:{0,1}, 3:{0,1,2}, 4:{0,1,2,3}
+ */
+ public void testManyToMany()
+ throws DatabaseException {
+
+ SortedMap<Integer,SortedSet<Integer>> expected =
+ new TreeMap<Integer,SortedSet<Integer>>();
+
+ for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+ for (int i = 0; i < priKey; i += 1) {
+ Integer secKey = i;
+ SortedSet<Integer> values = expected.get(secKey);
+ if (values == null) {
+ values = new TreeSet<Integer>();
+ expected.put(secKey, values);
+ }
+ values.add(priKey);
+ }
+ }
+
+ open();
+ addEntities(primary);
+ checkSecondary(manyToMany, manyToManyRaw, expected);
+ checkDelete(manyToMany, manyToManyRaw, expected);
+ close();
+ }
+
+ private void addEntities(PrimaryIndex<Integer,MyEntity> primary)
+ throws DatabaseException {
+
+ Transaction txn = txnBegin();
+ for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+ MyEntity prev = primary.put(txn, new MyEntity(priKey));
+ assertNull(prev);
+ }
+ txnCommit(txn);
+ }
+
+ private void checkDelete(SecondaryIndex<Integer,Integer,MyEntity> index,
+ SecondaryIndex<Object,Object,RawObject> indexRaw,
+ SortedMap<Integer,SortedSet<Integer>> expected)
+ throws DatabaseException {
+
+ SortedMap<Integer,SortedSet<Integer>> expectedSubIndex =
+ new TreeMap<Integer,SortedSet<Integer>>();
+
+ while (expected.size() > 0) {
+ Integer delSecKey = expected.firstKey();
+ SortedSet<Integer> deletedPriKeys = expected.remove(delSecKey);
+ for (SortedSet<Integer> priKeys : expected.values()) {
+ priKeys.removeAll(deletedPriKeys);
+ }
+ Transaction txn = txnBegin();
+ boolean deleted = index.delete(txn, delSecKey);
+ assertEquals(deleted, !deletedPriKeys.isEmpty());
+ deleted = index.delete(txn, delSecKey);
+ assertTrue(!deleted);
+ assertNull(index.get(txn, delSecKey, null));
+ txnCommit(txn);
+ checkSecondary(index, indexRaw, expected);
+ }
+
+ /*
+ * Delete remaining records so that the primary index is empty. Use
+ * the RawStore for variety.
+ */
+ Transaction txn = txnBegin();
+ for (int priKey = 0; priKey < N_RECORDS; priKey += 1) {
+ primaryRaw.delete(txn, priKey);
+ }
+ txnCommit(txn);
+ checkAllEmpty();
+ }
+
+ private void checkSecondary(SecondaryIndex<Integer,Integer,MyEntity> index,
+ SecondaryIndex<Object,Object,RawObject>
+ indexRaw,
+ SortedMap<Integer,SortedSet<Integer>> expected)
+ throws DatabaseException {
+
+ checkIndex(index, expected, keyGetter, entityGetter);
+ checkIndex(index.keysIndex(), expected, keyGetter, keyGetter);
+
+ checkIndex(indexRaw, expected, rawKeyGetter, rawEntityGetter);
+ checkIndex(indexRaw.keysIndex(), expected, rawKeyGetter, rawKeyGetter);
+
+ SortedMap<Integer,SortedSet<Integer>> expectedSubIndex =
+ new TreeMap<Integer,SortedSet<Integer>>();
+
+ for (Integer secKey : expected.keySet()) {
+ expectedSubIndex.clear();
+ for (Integer priKey : expected.get(secKey)) {
+ SortedSet<Integer> values = new TreeSet<Integer>();
+ values.add(priKey);
+ expectedSubIndex.put(priKey, values);
+ }
+ checkIndex(index.subIndex(secKey),
+ expectedSubIndex,
+ keyGetter,
+ entityGetter);
+ checkIndex(indexRaw.subIndex(secKey),
+ expectedSubIndex,
+ rawKeyGetter,
+ rawEntityGetter);
+ }
+ }
+
+ private <K,V> void checkIndex(EntityIndex<K,V> index,
+ SortedMap<Integer,SortedSet<Integer>>
+ expected,
+ Getter<K> kGetter,
+ Getter<V> vGetter)
+ throws DatabaseException {
+
+ SortedMap<K,V> map = index.sortedMap();
+
+ Transaction txn = txnBegin();
+ for (int i : expected.keySet()) {
+ K k = kGetter.fromInt(i);
+ SortedSet<Integer> dups = expected.get(i);
+ if (dups.isEmpty()) {
+
+ /* EntityIndex */
+ V v = index.get(txn, k, null);
+ assertNull(v);
+ assertTrue(!index.contains(txn, k, null));
+
+ /* Map/Collection */
+ v = map.get(i);
+ assertNull(v);
+ assertTrue(!map.containsKey(i));
+ } else {
+ int j = dups.first();
+
+ /* EntityIndex */
+ V v = index.get(txn, k, null);
+ assertNotNull(v);
+ assertEquals(j, vGetter.getKey(v));
+ assertTrue(index.contains(txn, k, null));
+
+ /* Map/Collection */
+ v = map.get(i);
+ assertNotNull(v);
+ assertEquals(j, vGetter.getKey(v));
+ assertTrue(map.containsKey(i));
+ assertTrue("" + i + ' ' + j + ' ' + v + ' ' + map,
+ map.containsValue(v));
+ assertTrue(map.keySet().contains(i));
+ assertTrue(map.values().contains(v));
+ assertTrue
+ (map.entrySet().contains(new MapEntryParameter(i, v)));
+ }
+ }
+ txnCommit(txn);
+
+ int keysSize = expandKeySize(expected);
+ int valuesSize = expandValueSize(expected);
+
+ /* EntityIndex.count */
+ assertEquals("keysSize=" + keysSize, valuesSize, index.count());
+
+ /* Map/Collection size */
+ assertEquals(valuesSize, map.size());
+ assertEquals(valuesSize, map.values().size());
+ assertEquals(valuesSize, map.entrySet().size());
+ assertEquals(keysSize, map.keySet().size());
+
+ /* Map/Collection isEmpty */
+ assertEquals(valuesSize == 0, map.isEmpty());
+ assertEquals(valuesSize == 0, map.values().isEmpty());
+ assertEquals(valuesSize == 0, map.entrySet().isEmpty());
+ assertEquals(keysSize == 0, map.keySet().isEmpty());
+
+ txn = txnBeginCursor();
+
+ /* Unconstrained cursors. */
+ checkCursor
+ (index.keys(txn, null),
+ map.keySet(), true,
+ expandKeys(expected), kGetter);
+ checkCursor
+ (index.entities(txn, null),
+ map.values(), false,
+ expandValues(expected), vGetter);
+
+ /* Range cursors. */
+ if (expected.isEmpty()) {
+ checkOpenRanges(txn, 0, index, expected, kGetter, vGetter);
+ checkClosedRanges(txn, 0, 1, index, expected, kGetter, vGetter);
+ } else {
+ int firstKey = expected.firstKey();
+ int lastKey = expected.lastKey();
+ for (int i = firstKey - 1; i <= lastKey + 1; i += 1) {
+ checkOpenRanges(txn, i, index, expected, kGetter, vGetter);
+ int j = i + 1;
+ if (j < lastKey + 1) {
+ checkClosedRanges
+ (txn, i, j, index, expected, kGetter, vGetter);
+ }
+ }
+ }
+
+ txnCommit(txn);
+ }
+
+ private <K,V> void checkOpenRanges(Transaction txn, int i,
+ EntityIndex<K,V> index,
+ SortedMap<Integer,SortedSet<Integer>>
+ expected,
+ Getter<K> kGetter,
+ Getter<V> vGetter)
+ throws DatabaseException {
+
+ SortedMap<K,V> map = index.sortedMap();
+ SortedMap<Integer,SortedSet<Integer>> rangeExpected;
+ K k = kGetter.fromInt(i);
+ K kPlusOne = kGetter.fromInt(i + 1);
+
+ /* Head range exclusive. */
+ rangeExpected = expected.headMap(i);
+ checkCursor
+ (index.keys(txn, null, false, k, false, null),
+ map.headMap(k).keySet(), true,
+ expandKeys(rangeExpected), kGetter);
+ checkCursor
+ (index.entities(txn, null, false, k, false, null),
+ map.headMap(k).values(), false,
+ expandValues(rangeExpected), vGetter);
+
+ /* Head range inclusive. */
+ rangeExpected = expected.headMap(i + 1);
+ checkCursor
+ (index.keys(txn, null, false, k, true, null),
+ map.headMap(kPlusOne).keySet(), true,
+ expandKeys(rangeExpected), kGetter);
+ checkCursor
+ (index.entities(txn, null, false, k, true, null),
+ map.headMap(kPlusOne).values(), false,
+ expandValues(rangeExpected), vGetter);
+
+ /* Tail range exclusive. */
+ rangeExpected = expected.tailMap(i + 1);
+ checkCursor
+ (index.keys(txn, k, false, null, false, null),
+ map.tailMap(kPlusOne).keySet(), true,
+ expandKeys(rangeExpected), kGetter);
+ checkCursor
+ (index.entities(txn, k, false, null, false, null),
+ map.tailMap(kPlusOne).values(), false,
+ expandValues(rangeExpected), vGetter);
+
+ /* Tail range inclusive. */
+ rangeExpected = expected.tailMap(i);
+ checkCursor
+ (index.keys(txn, k, true, null, false, null),
+ map.tailMap(k).keySet(), true,
+ expandKeys(rangeExpected), kGetter);
+ checkCursor
+ (index.entities(txn, k, true, null, false, null),
+ map.tailMap(k).values(), false,
+ expandValues(rangeExpected), vGetter);
+ }
+
+ private <K,V> void checkClosedRanges(Transaction txn, int i, int j,
+ EntityIndex<K,V> index,
+ SortedMap<Integer,SortedSet<Integer>>
+ expected,
+ Getter<K> kGetter,
+ Getter<V> vGetter)
+ throws DatabaseException {
+
+ SortedMap<K,V> map = index.sortedMap();
+ SortedMap<Integer,SortedSet<Integer>> rangeExpected;
+ K k = kGetter.fromInt(i);
+ K kPlusOne = kGetter.fromInt(i + 1);
+ K l = kGetter.fromInt(j);
+ K lPlusOne = kGetter.fromInt(j + 1);
+
+ /* Sub range exclusive. */
+ rangeExpected = expected.subMap(i + 1, j);
+ checkCursor
+ (index.keys(txn, k, false, l, false, null),
+ map.subMap(kPlusOne, l).keySet(), true,
+ expandKeys(rangeExpected), kGetter);
+ checkCursor
+ (index.entities(txn, k, false, l, false, null),
+ map.subMap(kPlusOne, l).values(), false,
+ expandValues(rangeExpected), vGetter);
+
+ /* Sub range inclusive. */
+ rangeExpected = expected.subMap(i, j + 1);
+ checkCursor
+ (index.keys(txn, k, true, l, true, null),
+ map.subMap(k, lPlusOne).keySet(), true,
+ expandKeys(rangeExpected), kGetter);
+ checkCursor
+ (index.entities(txn, k, true, l, true, null),
+ map.subMap(k, lPlusOne).values(), false,
+ expandValues(rangeExpected), vGetter);
+ }
+
+ private List<List<Integer>>
+ expandKeys(SortedMap<Integer,SortedSet<Integer>> map) {
+
+ List<List<Integer>> list = new ArrayList<List<Integer>>();
+ for (Integer key : map.keySet()) {
+ SortedSet<Integer> values = map.get(key);
+ List<Integer> dups = new ArrayList<Integer>();
+ for (int i = 0; i < values.size(); i += 1) {
+ dups.add(key);
+ }
+ list.add(dups);
+ }
+ return list;
+ }
+
+ private List<List<Integer>>
+ expandValues(SortedMap<Integer,SortedSet<Integer>> map) {
+
+ List<List<Integer>> list = new ArrayList<List<Integer>>();
+ for (SortedSet<Integer> values : map.values()) {
+ list.add(new ArrayList<Integer>(values));
+ }
+ return list;
+ }
+
+ private int expandKeySize(SortedMap<Integer,SortedSet<Integer>> map) {
+
+ int size = 0;
+ for (SortedSet<Integer> values : map.values()) {
+ if (values.size() > 0) {
+ size += 1;
+ }
+ }
+ return size;
+ }
+
+ private int expandValueSize(SortedMap<Integer,SortedSet<Integer>> map) {
+
+ int size = 0;
+ for (SortedSet<Integer> values : map.values()) {
+ size += values.size();
+ }
+ return size;
+ }
+
+ private <T> void checkCursor(EntityCursor<T> cursor,
+ Collection<T> collection,
+ boolean collectionIsKeySet,
+ List<List<Integer>> expected,
+ Getter<T> getter)
+ throws DatabaseException {
+
+ boolean first;
+ boolean firstDup;
+ Iterator<T> iterator = collection.iterator();
+
+ for (List<Integer> dups : expected) {
+ for (int i : dups) {
+ T o = cursor.next();
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ /* Value iterator over duplicates. */
+ if (!collectionIsKeySet) {
+ assertTrue(iterator.hasNext());
+ o = iterator.next();
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ }
+ }
+ }
+
+ first = true;
+ for (List<Integer> dups : expected) {
+ firstDup = true;
+ for (int i : dups) {
+ T o = first ? cursor.first()
+ : (firstDup ? cursor.next() : cursor.nextDup());
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ first = false;
+ firstDup = false;
+ }
+ }
+
+ first = true;
+ for (List<Integer> dups : expected) {
+ if (!dups.isEmpty()) {
+ int i = dups.get(0);
+ T o = first ? cursor.first() : cursor.nextNoDup();
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ /* Key iterator over non-duplicates. */
+ if (collectionIsKeySet) {
+ assertTrue(iterator.hasNext());
+ o = iterator.next();
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ }
+ first = false;
+ }
+ }
+
+ List<List<Integer>> reversed = new ArrayList<List<Integer>>();
+ for (List<Integer> dups : expected) {
+ ArrayList<Integer> reversedDups = new ArrayList<Integer>(dups);
+ Collections.reverse(reversedDups);
+ reversed.add(reversedDups);
+ }
+ Collections.reverse(reversed);
+
+ first = true;
+ for (List<Integer> dups : reversed) {
+ for (int i : dups) {
+ T o = first ? cursor.last() : cursor.prev();
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ first = false;
+ }
+ }
+
+ first = true;
+ for (List<Integer> dups : reversed) {
+ firstDup = true;
+ for (int i : dups) {
+ T o = first ? cursor.last()
+ : (firstDup ? cursor.prev() : cursor.prevDup());
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ first = false;
+ firstDup = false;
+ }
+ }
+
+ first = true;
+ for (List<Integer> dups : reversed) {
+ if (!dups.isEmpty()) {
+ int i = dups.get(0);
+ T o = first ? cursor.last() : cursor.prevNoDup();
+ assertNotNull(o);
+ assertEquals(i, getter.getKey(o));
+ first = false;
+ }
+ }
+
+ cursor.close();
+ }
+
+ private void checkAllEmpty()
+ throws DatabaseException {
+
+ checkEmpty(primary);
+ checkEmpty(oneToOne);
+ checkEmpty(oneToMany);
+ checkEmpty(manyToOne);
+ checkEmpty(manyToMany);
+ }
+
+ private <K,V> void checkEmpty(EntityIndex<K,V> index)
+ throws DatabaseException {
+
+ EntityCursor<K> keys = index.keys();
+ assertNull(keys.next());
+ assertTrue(!keys.iterator().hasNext());
+ keys.close();
+ EntityCursor<V> entities = index.entities();
+ assertNull(entities.next());
+ assertTrue(!entities.iterator().hasNext());
+ entities.close();
+ }
+
+ private interface Getter<T> {
+ int getKey(T o);
+ T fromInt(int i);
+ }
+
+ private static Getter<MyEntity> entityGetter =
+ new Getter<MyEntity>() {
+ public int getKey(MyEntity o) {
+ return o.key;
+ }
+ public MyEntity fromInt(int i) {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ private static Getter<Integer> keyGetter =
+ new Getter<Integer>() {
+ public int getKey(Integer o) {
+ return o;
+ }
+ public Integer fromInt(int i) {
+ return Integer.valueOf(i);
+ }
+ };
+
+ private static Getter<RawObject> rawEntityGetter =
+ new Getter<RawObject>() {
+ public int getKey(RawObject o) {
+ Object val = o.getValues().get("key");
+ return ((Integer) val).intValue();
+ }
+ public RawObject fromInt(int i) {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ private static Getter<Object> rawKeyGetter =
+ new Getter<Object>() {
+ public int getKey(Object o) {
+ return ((Integer) o).intValue();
+ }
+ public Object fromInt(int i) {
+ return Integer.valueOf(i);
+ }
+ };
+
+ @Entity
+ private static class MyEntity {
+
+ @PrimaryKey
+ private int key;
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ private int oneToOne;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private int manyToOne;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private Set<Integer> oneToMany = new TreeSet<Integer>();
+
+ @SecondaryKey(relate=MANY_TO_MANY)
+ private Set<Integer> manyToMany = new TreeSet<Integer>();
+
+ private MyEntity() {}
+
+ private MyEntity(int key) {
+
+ /* example keys: {0, 1, 2, 3, 4} */
+ this.key = key;
+
+ /* { 0:0, 1:-1, 2:-2, 3:-3, 4:-4 } */
+ oneToOne = -key;
+
+ /* { 0:0, 1:1, 2:2, 3:0, 4:1 } */
+ manyToOne = key % THREE_TO_ONE;
+
+ /* { 0:{}, 1:{10}, 2:{20,21}, 3:{30,31,32}, 4:{40,41,42,43} */
+ for (int i = 0; i < key; i += 1) {
+ oneToMany.add((N_RECORDS * key) + i);
+ }
+
+ /* { 0:{}, 1:{0}, 2:{0,1}, 3:{0,1,2}, 4:{0,1,2,3} */
+ for (int i = 0; i < key; i += 1) {
+ manyToMany.add(i);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "MyEntity " + key;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/JoinTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/JoinTest.java
new file mode 100644
index 0000000..b77d37d
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/JoinTest.java
@@ -0,0 +1,176 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Test;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Transaction;
+import com.sleepycat.persist.EntityJoin;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.ForwardCursor;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * @author Mark Hayes
+ */
+public class JoinTest extends TxnTestCase {
+
+ private static final int N_RECORDS = 5;
+
+ static protected Class<?> testClass = JoinTest.class;
+
+ public static Test suite() {
+ return txnTestSuite(testClass, null, null);
+ }
+
+ private EntityStore store;
+ private PrimaryIndex<Integer,MyEntity> primary;
+ private SecondaryIndex<Integer,Integer,MyEntity> sec1;
+ private SecondaryIndex<Integer,Integer,MyEntity> sec2;
+ private SecondaryIndex<Integer,Integer,MyEntity> sec3;
+
+ /**
+ * Opens the store.
+ */
+ private void open()
+ throws DatabaseException {
+
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(envConfig.getAllowCreate());
+ config.setTransactional(envConfig.getTransactional());
+
+ store = new EntityStore(env, "test", config);
+
+ primary = store.getPrimaryIndex(Integer.class, MyEntity.class);
+ sec1 = store.getSecondaryIndex(primary, Integer.class, "k1");
+ sec2 = store.getSecondaryIndex(primary, Integer.class, "k2");
+ sec3 = store.getSecondaryIndex(primary, Integer.class, "k3");
+ }
+
+ /**
+ * Closes the store.
+ */
+ private void close()
+ throws DatabaseException {
+
+ store.close();
+ }
+
+ public void testJoin()
+ throws DatabaseException {
+
+ open();
+
+ /*
+ * Primary keys: { 0, 1, 2, 3, 4 }
+ * Secondary k1: { 0:0, 0:1, 0:2, 0:3, 0:4 }
+ * Secondary k2: { 0:0, 1:1, 0:2, 1:3, 0:4 }
+ * Secondary k3: { 0:0, 1:1, 2:2, 0:3, 1:4 }
+ */
+ Transaction txn = txnBegin();
+ for (int i = 0; i < N_RECORDS; i += 1) {
+ MyEntity e = new MyEntity(i, 0, i % 2, i % 3);
+ boolean ok = primary.putNoOverwrite(txn, e);
+ assertTrue(ok);
+ }
+ txnCommit(txn);
+
+ /*
+ * k1, k2, k3, -> { primary keys }
+ * -1 means don't include the key in the join.
+ */
+ doJoin( 0, 0, 0, new int[] { 0 });
+ doJoin( 0, 0, 1, new int[] { 4 });
+ doJoin( 0, 0, -1, new int[] { 0, 2, 4 });
+ doJoin(-1, 1, 1, new int[] { 1 });
+ doJoin(-1, 2, 2, new int[] { });
+ doJoin(-1, -1, 2, new int[] { 2 });
+
+ close();
+ }
+
+ private void doJoin(int k1, int k2, int k3, int[] expectKeys)
+ throws DatabaseException {
+
+ List<Integer> expect = new ArrayList<Integer>();
+ for (int i : expectKeys) {
+ expect.add(i);
+ }
+ EntityJoin join = new EntityJoin(primary);
+ if (k1 >= 0) {
+ join.addCondition(sec1, k1);
+ }
+ if (k2 >= 0) {
+ join.addCondition(sec2, k2);
+ }
+ if (k3 >= 0) {
+ join.addCondition(sec3, k3);
+ }
+ List<Integer> found;
+ Transaction txn = txnBegin();
+
+ /* Keys */
+ found = new ArrayList<Integer>();
+ ForwardCursor<Integer> keys = join.keys(txn, null);
+ for (int i : keys) {
+ found.add(i);
+ }
+ keys.close();
+ assertEquals(expect, found);
+
+ /* Entities */
+ found = new ArrayList<Integer>();
+ ForwardCursor<MyEntity> entities = join.entities(txn, null);
+ for (MyEntity e : entities) {
+ found.add(e.id);
+ }
+ entities.close();
+ assertEquals(expect, found);
+
+ txnCommit(txn);
+ }
+
+ @Entity
+ private static class MyEntity {
+ @PrimaryKey
+ int id;
+ @SecondaryKey(relate=MANY_TO_ONE)
+ int k1;
+ @SecondaryKey(relate=MANY_TO_ONE)
+ int k2;
+ @SecondaryKey(relate=MANY_TO_ONE)
+ int k3;
+
+ private MyEntity() {}
+
+ MyEntity(int id, int k1, int k2, int k3) {
+ this.id = id;
+ this.k1 = k1;
+ this.k2 = k2;
+ this.k3 = k3;
+ }
+
+ @Override
+ public String toString() {
+ return "MyEntity " + id + ' ' + k1 + ' ' + k2 + ' ' + k3;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/NegativeTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/NegativeTest.java
new file mode 100644
index 0000000..5503451
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/NegativeTest.java
@@ -0,0 +1,644 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import junit.framework.Test;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PersistentProxy;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Negative tests.
+ *
+ * @author Mark Hayes
+ */
+public class NegativeTest extends TxnTestCase {
+
+ static protected Class<?> testClass = NegativeTest.class;
+
+ public static Test suite() {
+ return txnTestSuite(testClass, null, null);
+ }
+
+ private EntityStore store;
+
+ private void open()
+ throws DatabaseException {
+
+ open(null);
+ }
+
+ private void open(Class<ProxyExtendsEntity> clsToRegister)
+ throws DatabaseException {
+
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(envConfig.getAllowCreate());
+ config.setTransactional(envConfig.getTransactional());
+
+ if (clsToRegister != null) {
+ AnnotationModel model = new AnnotationModel();
+ model.registerClass(clsToRegister);
+ config.setModel(model);
+ }
+
+ store = new EntityStore(env, "test", config);
+ }
+
+ private void close()
+ throws DatabaseException {
+
+ store.close();
+ store = null;
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ super.setUp();
+ }
+
+ @Override
+ public void tearDown()
+ throws Exception {
+
+ if (store != null) {
+ try {
+ store.close();
+ } catch (Throwable e) {
+ System.out.println("tearDown: " + e);
+ }
+ store = null;
+ }
+ super.tearDown();
+ }
+
+ public void testBadKeyClass1()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex(BadKeyClass1.class, UseBadKeyClass1.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf("@KeyField") >= 0);
+ }
+ close();
+ }
+
+ /** Missing @KeyField in composite key class. */
+ @Persistent
+ static class BadKeyClass1 {
+
+ private int f1;
+ }
+
+ @Entity
+ static class UseBadKeyClass1 {
+
+ @PrimaryKey
+ private BadKeyClass1 f1 = new BadKeyClass1();
+
+ @SecondaryKey(relate=ONE_TO_ONE)
+ private BadKeyClass1 f2 = new BadKeyClass1();
+ }
+
+ public void testBadSequenceKeys()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex(Boolean.class, BadSequenceKeyEntity1.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("Type not allowed for sequence") >= 0);
+ }
+ try {
+ store.getPrimaryIndex(BadSequenceKeyEntity2.Key.class,
+ BadSequenceKeyEntity2.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("Type not allowed for sequence") >= 0);
+ }
+ try {
+ store.getPrimaryIndex(BadSequenceKeyEntity3.Key.class,
+ BadSequenceKeyEntity3.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("A composite key class used with a sequence may contain " +
+ "only a single integer key field")>= 0);
+ }
+ close();
+ }
+
+ /** Boolean not allowed for sequence key. */
+ @Entity
+ static class BadSequenceKeyEntity1 {
+
+ @PrimaryKey(sequence="X")
+ private boolean key;
+ }
+
+ /** Composite key with non-integer field not allowed for sequence key. */
+ @Entity
+ static class BadSequenceKeyEntity2 {
+
+ @PrimaryKey(sequence="X")
+ private Key key;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ boolean key;
+ }
+ }
+
+ /** Composite key with multiple key fields not allowed for sequence key. */
+ @Entity
+ static class BadSequenceKeyEntity3 {
+
+ @PrimaryKey(sequence="X")
+ private Key key;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ int key;
+ @KeyField(2)
+ int key2;
+ }
+ }
+
+ /**
+ * A proxied object may not current contain a field that references the
+ * parent proxy. [#15815]
+ */
+ public void testProxyNestedRef()
+ throws DatabaseException {
+
+ open();
+ PrimaryIndex<Integer,ProxyNestedRef> index = store.getPrimaryIndex
+ (Integer.class, ProxyNestedRef.class);
+ ProxyNestedRef entity = new ProxyNestedRef();
+ entity.list.add(entity.list);
+ try {
+ index.put(entity);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("Cannot embed a reference to a proxied object") >= 0);
+ }
+ close();
+ }
+
+ @Entity
+ static class ProxyNestedRef {
+
+ @PrimaryKey
+ private int key;
+
+ ArrayList<Object> list = new ArrayList<Object>();
+ }
+
+ /**
+ * Disallow primary keys on entity subclasses. [#15757]
+ */
+ public void testEntitySubclassWithPrimaryKey()
+ throws DatabaseException {
+
+ open();
+ PrimaryIndex<Integer,EntitySuperClass> index = store.getPrimaryIndex
+ (Integer.class, EntitySuperClass.class);
+ EntitySuperClass e1 = new EntitySuperClass(1, "one");
+ index.put(e1);
+ assertEquals(e1, index.get(1));
+ EntitySubClass e2 = new EntitySubClass(2, "two", "foo", 9);
+ try {
+ index.put(e2);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(e.getMessage().contains
+ ("PrimaryKey may not appear on an Entity subclass"));
+ }
+ assertEquals(e1, index.get(1));
+ close();
+ }
+
+ @Entity
+ static class EntitySuperClass {
+
+ @PrimaryKey
+ private int x;
+
+ private String y;
+
+ EntitySuperClass(int x, String y) {
+ assert y != null;
+ this.x = x;
+ this.y = y;
+ }
+
+ private EntitySuperClass() {}
+
+ @Override
+ public String toString() {
+ return "x=" + x + " y=" + y;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof EntitySuperClass) {
+ EntitySuperClass o = (EntitySuperClass) other;
+ return x == o.x && y.equals(o.y);
+ } else {
+ return false;
+ }
+ }
+ }
+
+ @Persistent
+ static class EntitySubClass extends EntitySuperClass {
+
+ @PrimaryKey
+ private String foo;
+
+ private int z;
+
+ EntitySubClass(int x, String y, String foo, int z) {
+ super(x, y);
+ assert foo != null;
+ this.foo = foo;
+ this.z = z;
+ }
+
+ private EntitySubClass() {}
+
+ @Override
+ public String toString() {
+ return super.toString() + " z=" + z;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof EntitySubClass) {
+ EntitySubClass o = (EntitySubClass) other;
+ return super.equals(o) && z == o.z;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ /**
+ * Disallow embedded entity classes and subclasses. [#16077]
+ */
+ public void testEmbeddedEntity()
+ throws DatabaseException {
+
+ open();
+ PrimaryIndex<Integer,EmbeddingEntity> index = store.getPrimaryIndex
+ (Integer.class, EmbeddingEntity.class);
+ EmbeddingEntity e1 = new EmbeddingEntity(1, null);
+ index.put(e1);
+ assertEquals(e1, index.get(1));
+
+ EmbeddingEntity e2 =
+ new EmbeddingEntity(2, new EntitySuperClass(2, "two"));
+ try {
+ index.put(e2);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(e.getMessage().contains
+ ("References to entities are not allowed"));
+ }
+
+ EmbeddingEntity e3 = new EmbeddingEntity
+ (3, new EmbeddedEntitySubClass(3, "three", "foo", 9));
+ try {
+ index.put(e3);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(e.toString(), e.getMessage().contains
+ ("References to entities are not allowed"));
+ }
+
+ assertEquals(e1, index.get(1));
+ close();
+ }
+
+ @Entity
+ static class EmbeddingEntity {
+
+ @PrimaryKey
+ private int x;
+
+ private EntitySuperClass y;
+
+ EmbeddingEntity(int x, EntitySuperClass y) {
+ this.x = x;
+ this.y = y;
+ }
+
+ private EmbeddingEntity() {}
+
+ @Override
+ public String toString() {
+ return "x=" + x + " y=" + y;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof EmbeddingEntity) {
+ EmbeddingEntity o = (EmbeddingEntity) other;
+ return x == o.x &&
+ ((y == null) ? (o.y == null) : y.equals(o.y));
+ } else {
+ return false;
+ }
+ }
+ }
+
+ @Persistent
+ static class EmbeddedEntitySubClass extends EntitySuperClass {
+
+ private String foo;
+
+ private int z;
+
+ EmbeddedEntitySubClass(int x, String y, String foo, int z) {
+ super(x, y);
+ assert foo != null;
+ this.foo = foo;
+ this.z = z;
+ }
+
+ private EmbeddedEntitySubClass() {}
+
+ @Override
+ public String toString() {
+ return super.toString() + " z=" + z;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof EmbeddedEntitySubClass) {
+ EmbeddedEntitySubClass o = (EmbeddedEntitySubClass) other;
+ return super.equals(o) && z == o.z;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ /**
+ * Disallow SecondaryKey collection with no type parameter. [#15950]
+ */
+ public void testTypelessKeyCollection()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex
+ (Integer.class, TypelessKeyCollectionEntity.class);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(e.toString(), e.getMessage().contains
+ ("Collection typed secondary key field must have a " +
+ "single generic type argument and a wildcard or type " +
+ "bound is not allowed"));
+ }
+ close();
+ }
+
+ @Entity
+ static class TypelessKeyCollectionEntity {
+
+ @PrimaryKey
+ private int x;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private Collection keys = new ArrayList();
+
+ TypelessKeyCollectionEntity(int x) {
+ this.x = x;
+ }
+
+ private TypelessKeyCollectionEntity() {}
+ }
+
+ /**
+ * Disallow a persistent proxy that extends an entity. [#15950]
+ */
+ public void testProxyEntity()
+ throws DatabaseException {
+
+ try {
+ open(ProxyExtendsEntity.class);
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue(e.toString(), e.getMessage().contains
+ ("A proxy may not be an entity"));
+ }
+ }
+
+ @Persistent(proxyFor=BigDecimal.class)
+ static class ProxyExtendsEntity
+ extends EntitySuperClass
+ implements PersistentProxy<BigDecimal> {
+
+ private String rep;
+
+ public BigDecimal convertProxy() {
+ return new BigDecimal(rep);
+ }
+
+ public void initializeProxy(BigDecimal o) {
+ rep = o.toString();
+ }
+ }
+
+ /**
+ * Wrapper type not allowed for nullified foreign key.
+ */
+ public void testBadNullifyKey()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex(Integer.class, BadNullifyKeyEntity1.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("NULLIFY may not be used with primitive fields") >= 0);
+ }
+ close();
+ }
+
+ @Entity
+ static class BadNullifyKeyEntity1 {
+
+ @PrimaryKey
+ private int key;
+
+ @SecondaryKey(relate=ONE_TO_ONE,
+ relatedEntity=BadNullifyKeyEntity2.class,
+ onRelatedEntityDelete=NULLIFY)
+ private int secKey; // Should be Integer, not int.
+ }
+
+ @Entity
+ static class BadNullifyKeyEntity2 {
+
+ @PrimaryKey
+ private int key;
+ }
+
+ /**
+ * @Persistent not allowed on an enum.
+ */
+ public void testPersistentEnum()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex(Integer.class, PersistentEnumEntity.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("not allowed for enum, interface, or primitive") >= 0);
+ }
+ close();
+ }
+
+ @Entity
+ static class PersistentEnumEntity {
+
+ @PrimaryKey
+ private int key;
+
+ @Persistent
+ enum MyEnum {X, Y, Z};
+
+ MyEnum f1;
+ }
+
+ /**
+ * Disallow a reference to an interface marked @Persistent.
+ */
+ public void testPersistentInterface()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex(Integer.class,
+ PersistentInterfaceEntity1.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("not allowed for enum, interface, or primitive") >= 0);
+ }
+ close();
+ }
+
+ @Entity
+ static class PersistentInterfaceEntity1 {
+
+ @PrimaryKey
+ private int key;
+
+ @SecondaryKey(relate=ONE_TO_ONE,
+ relatedEntity=PersistentInterfaceEntity2.class)
+ private int secKey; // Should be Integer, not int.
+ }
+
+ @Persistent
+ interface PersistentInterfaceEntity2 {
+ }
+
+ /**
+ * Disallow reference to @Persistent inner class.
+ */
+ public void testPersistentInnerClass()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex(Integer.class,
+ PersistentInnerClassEntity1.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("Inner classes not allowed") >= 0);
+ }
+ close();
+ }
+
+ @Entity
+ static class PersistentInnerClassEntity1 {
+
+ @PrimaryKey
+ private int key;
+
+ private PersistentInnerClass f;
+ }
+
+ /* An inner (non-static) class is illegal. */
+ @Persistent
+ class PersistentInnerClass {
+
+ private int x;
+ }
+
+ /**
+ * Disallow @Entity inner class.
+ */
+ public void testEntityInnerClass()
+ throws DatabaseException {
+
+ open();
+ try {
+ store.getPrimaryIndex(Integer.class,
+ EntityInnerClassEntity.class);
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.getMessage().indexOf
+ ("Inner classes not allowed") >= 0);
+ }
+ close();
+ }
+
+ /* An inner (non-static) class is illegal. */
+ @Entity
+ class EntityInnerClassEntity {
+
+ @PrimaryKey
+ private int key;
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/OperationTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/OperationTest.java
new file mode 100644
index 0000000..4fc81fc
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/OperationTest.java
@@ -0,0 +1,1552 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.DeleteAction.CASCADE;
+import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
+import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
+
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import junit.framework.Test;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Database;
+import com.sleepycat.db.DatabaseConfig;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.StatsConfig;
+import com.sleepycat.db.Transaction;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityIndex;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.impl.Store;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.NotPersistent;
+import com.sleepycat.persist.model.NotTransient;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.persist.raw.RawStore;
+import com.sleepycat.util.test.TxnTestCase;
+
+/**
+ * Tests misc store and index operations that are not tested by IndexTest.
+ *
+ * @author Mark Hayes
+ */
+public class OperationTest extends TxnTestCase {
+
+ private static final String STORE_NAME = "test";
+
+ static protected Class<?> testClass = OperationTest.class;
+
+ public static Test suite() {
+ return txnTestSuite(testClass, null, null);
+ }
+
+ private EntityStore store;
+
+ private void openReadOnly()
+ throws DatabaseException {
+
+ StoreConfig config = new StoreConfig();
+ config.setReadOnly(true);
+ open(config);
+ }
+
+ private void open()
+ throws DatabaseException {
+
+ open((Class) null);
+ }
+
+ private void open(Class clsToRegister)
+ throws DatabaseException {
+
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(envConfig.getAllowCreate());
+ if (clsToRegister != null) {
+ com.sleepycat.persist.model.EntityModel model =
+ new com.sleepycat.persist.model.AnnotationModel();
+ model.registerClass(clsToRegister);
+ config.setModel(model);
+ }
+ open(config);
+ }
+
+ private void open(StoreConfig config)
+ throws DatabaseException {
+
+ config.setTransactional(envConfig.getTransactional());
+ store = new EntityStore(env, STORE_NAME, config);
+ }
+
+ private void close()
+ throws DatabaseException {
+
+ store.close();
+ store = null;
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ super.setUp();
+ }
+
+ /**
+ * The store must be closed before closing the environment.
+ */
+ @Override
+ public void tearDown()
+ throws Exception {
+
+ try {
+ if (store != null) {
+ store.close();
+ }
+ } catch (Throwable e) {
+ System.out.println("During tearDown: " + e);
+ }
+ store = null;
+ super.tearDown();
+ }
+
+ public void testReadOnly()
+ throws DatabaseException {
+
+ open();
+ PrimaryIndex<Integer,SharedSequenceEntity1> priIndex =
+ store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class);
+ Transaction txn = txnBegin();
+ SharedSequenceEntity1 e = new SharedSequenceEntity1();
+ priIndex.put(txn, e);
+ assertEquals(1, e.key);
+ txnCommit(txn);
+ close();
+
+ /*
+ * Check that we can open the store read-only and read the records
+ * written above.
+ */
+ openReadOnly();
+ priIndex =
+ store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class);
+ e = priIndex.get(1);
+ assertNotNull(e);
+ close();
+ }
+
+
+
+ public void testUninitializedCursor()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,MyEntity> priIndex =
+ store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+ Transaction txn = txnBeginCursor();
+
+ MyEntity e = new MyEntity();
+ e.priKey = 1;
+ e.secKey = 1;
+ priIndex.put(txn, e);
+
+ EntityCursor<MyEntity> entities = priIndex.entities(txn, null);
+ try {
+ entities.nextDup();
+ fail();
+ } catch (IllegalStateException expected) {}
+ try {
+ entities.prevDup();
+ fail();
+ } catch (IllegalStateException expected) {}
+ try {
+ entities.current();
+ fail();
+ } catch (IllegalStateException expected) {}
+ try {
+ entities.delete();
+ fail();
+ } catch (IllegalStateException expected) {}
+ try {
+ entities.update(e);
+ fail();
+ } catch (IllegalStateException expected) {}
+ try {
+ entities.count();
+ fail();
+ } catch (IllegalStateException expected) {}
+
+ entities.close();
+ txnCommit(txn);
+ close();
+ }
+
+ public void testCursorCount()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,MyEntity> priIndex =
+ store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+ SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+ store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+ Transaction txn = txnBeginCursor();
+
+ MyEntity e = new MyEntity();
+ e.priKey = 1;
+ e.secKey = 1;
+ priIndex.put(txn, e);
+
+ EntityCursor<MyEntity> cursor = secIndex.entities(txn, null);
+ cursor.next();
+ assertEquals(1, cursor.count());
+ cursor.close();
+
+ e.priKey = 2;
+ priIndex.put(txn, e);
+ cursor = secIndex.entities(txn, null);
+ cursor.next();
+ assertEquals(2, cursor.count());
+ cursor.close();
+
+ txnCommit(txn);
+ close();
+ }
+
+ public void testCursorUpdate()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,MyEntity> priIndex =
+ store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+ SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+ store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+ Transaction txn = txnBeginCursor();
+
+ Integer k;
+ MyEntity e = new MyEntity();
+ e.priKey = 1;
+ e.secKey = 2;
+ priIndex.put(txn, e);
+
+ /* update() with primary entity cursor. */
+ EntityCursor<MyEntity> entities = priIndex.entities(txn, null);
+ e = entities.next();
+ assertNotNull(e);
+ assertEquals(1, e.priKey);
+ assertEquals(Integer.valueOf(2), e.secKey);
+ e.secKey = null;
+ assertTrue(entities.update(e));
+ e = entities.current();
+ assertNotNull(e);
+ assertEquals(1, e.priKey);
+ assertEquals(null, e.secKey);
+ e.secKey = 3;
+ assertTrue(entities.update(e));
+ e = entities.current();
+ assertNotNull(e);
+ assertEquals(1, e.priKey);
+ assertEquals(Integer.valueOf(3), e.secKey);
+ entities.close();
+
+ /* update() with primary keys cursor. */
+ EntityCursor<Integer> keys = priIndex.keys(txn, null);
+ k = keys.next();
+ assertNotNull(k);
+ assertEquals(Integer.valueOf(1), k);
+ try {
+ keys.update(2);
+ fail();
+ } catch (UnsupportedOperationException expected) {
+ }
+ keys.close();
+
+ /* update() with secondary entity cursor. */
+ entities = secIndex.entities(txn, null);
+ e = entities.next();
+ assertNotNull(e);
+ assertEquals(1, e.priKey);
+ assertEquals(Integer.valueOf(3), e.secKey);
+ try {
+ entities.update(e);
+ fail();
+ } catch (UnsupportedOperationException expected) {
+ } catch (IllegalArgumentException expectedForDbCore) {
+ }
+ entities.close();
+
+ /* update() with secondary keys cursor. */
+ keys = secIndex.keys(txn, null);
+ k = keys.next();
+ assertNotNull(k);
+ assertEquals(Integer.valueOf(3), k);
+ try {
+ keys.update(k);
+ fail();
+ } catch (UnsupportedOperationException expected) {
+ }
+ keys.close();
+
+ txnCommit(txn);
+ close();
+ }
+
+ public void testCursorDelete()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,MyEntity> priIndex =
+ store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+ SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+ store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+ Transaction txn = txnBeginCursor();
+
+ /* delete() with primary and secondary entities cursor. */
+
+ for (EntityIndex index : new EntityIndex[] { priIndex, secIndex }) {
+
+ MyEntity e = new MyEntity();
+ e.priKey = 1;
+ e.secKey = 1;
+ priIndex.put(txn, e);
+ e.priKey = 2;
+ priIndex.put(txn, e);
+
+ EntityCursor<MyEntity> cursor = index.entities(txn, null);
+
+ e = cursor.next();
+ assertNotNull(e);
+ assertEquals(1, e.priKey);
+ e = cursor.current();
+ assertNotNull(e);
+ assertEquals(1, e.priKey);
+ assertTrue(cursor.delete());
+ assertTrue(!cursor.delete());
+ assertNull(cursor.current());
+
+ e = cursor.next();
+ assertNotNull(e);
+ assertEquals(2, e.priKey);
+ e = cursor.current();
+ assertNotNull(e);
+ assertEquals(2, e.priKey);
+ assertTrue(cursor.delete());
+ assertTrue(!cursor.delete());
+ assertNull(cursor.current());
+
+ e = cursor.next();
+ assertNull(e);
+
+ if (index == priIndex) {
+ e = new MyEntity();
+ e.priKey = 2;
+ e.secKey = 1;
+ assertTrue(!cursor.update(e));
+ }
+
+ cursor.close();
+ }
+
+ /* delete() with primary and secondary keys cursor. */
+
+ for (EntityIndex index : new EntityIndex[] { priIndex, secIndex }) {
+
+ MyEntity e = new MyEntity();
+ e.priKey = 1;
+ e.secKey = 1;
+ priIndex.put(txn, e);
+ e.priKey = 2;
+ priIndex.put(txn, e);
+
+ EntityCursor<Integer> cursor = index.keys(txn, null);
+
+ Integer k = cursor.next();
+ assertNotNull(k);
+ assertEquals(1, k.intValue());
+ k = cursor.current();
+ assertNotNull(k);
+ assertEquals(1, k.intValue());
+ assertTrue(cursor.delete());
+ assertTrue(!cursor.delete());
+ assertNull(cursor.current());
+
+ int expectKey = (index == priIndex) ? 2 : 1;
+ k = cursor.next();
+ assertNotNull(k);
+ assertEquals(expectKey, k.intValue());
+ k = cursor.current();
+ assertNotNull(k);
+ assertEquals(expectKey, k.intValue());
+ assertTrue(cursor.delete());
+ assertTrue(!cursor.delete());
+ assertNull(cursor.current());
+
+ k = cursor.next();
+ assertNull(k);
+
+ cursor.close();
+ }
+
+ txnCommit(txn);
+ close();
+ }
+
+ public void testDeleteFromSubIndex()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,MyEntity> priIndex =
+ store.getPrimaryIndex(Integer.class, MyEntity.class);
+
+ SecondaryIndex<Integer,Integer,MyEntity> secIndex =
+ store.getSecondaryIndex(priIndex, Integer.class, "secKey");
+
+ Transaction txn = txnBegin();
+ MyEntity e = new MyEntity();
+ e.secKey = 1;
+ e.priKey = 1;
+ priIndex.put(txn, e);
+ e.priKey = 2;
+ priIndex.put(txn, e);
+ e.priKey = 3;
+ priIndex.put(txn, e);
+ e.priKey = 4;
+ priIndex.put(txn, e);
+ txnCommit(txn);
+
+ EntityIndex<Integer,MyEntity> subIndex = secIndex.subIndex(1);
+ txn = txnBeginCursor();
+ e = subIndex.get(txn, 1, null);
+ assertEquals(1, e.priKey);
+ assertEquals(Integer.valueOf(1), e.secKey);
+ e = subIndex.get(txn, 2, null);
+ assertEquals(2, e.priKey);
+ assertEquals(Integer.valueOf(1), e.secKey);
+ e = subIndex.get(txn, 3, null);
+ assertEquals(3, e.priKey);
+ assertEquals(Integer.valueOf(1), e.secKey);
+ e = subIndex.get(txn, 5, null);
+ assertNull(e);
+
+ boolean deleted = subIndex.delete(txn, 1);
+ assertTrue(deleted);
+ assertNull(subIndex.get(txn, 1, null));
+ assertNotNull(subIndex.get(txn, 2, null));
+
+ EntityCursor<MyEntity> cursor = subIndex.entities(txn, null);
+ boolean saw4 = false;
+ for (MyEntity e2 = cursor.first(); e2 != null; e2 = cursor.next()) {
+ if (e2.priKey == 3) {
+ cursor.delete();
+ }
+ if (e2.priKey == 4) {
+ saw4 = true;
+ }
+ }
+ cursor.close();
+ assertTrue(saw4);
+ assertNull(subIndex.get(txn, 1, null));
+ assertNull(subIndex.get(txn, 3, null));
+ assertNotNull(subIndex.get(txn, 2, null));
+ assertNotNull(subIndex.get(txn, 4, null));
+
+ txnCommit(txn);
+ close();
+ }
+
+ @Entity
+ static class MyEntity {
+
+ @PrimaryKey
+ private int priKey;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private Integer secKey;
+
+ private MyEntity() {}
+ }
+
+ public void testSharedSequence()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,SharedSequenceEntity1> priIndex1 =
+ store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class);
+
+ PrimaryIndex<Integer,SharedSequenceEntity2> priIndex2 =
+ store.getPrimaryIndex(Integer.class, SharedSequenceEntity2.class);
+
+ Transaction txn = txnBegin();
+ SharedSequenceEntity1 e1 = new SharedSequenceEntity1();
+ SharedSequenceEntity2 e2 = new SharedSequenceEntity2();
+ priIndex1.put(txn, e1);
+ assertEquals(1, e1.key);
+ priIndex2.putNoOverwrite(txn, e2);
+ assertEquals(Integer.valueOf(2), e2.key);
+ e1.key = 0;
+ priIndex1.putNoOverwrite(txn, e1);
+ assertEquals(3, e1.key);
+ e2.key = null;
+ priIndex2.put(txn, e2);
+ assertEquals(Integer.valueOf(4), e2.key);
+ txnCommit(txn);
+
+ close();
+ }
+
+ @Entity
+ static class SharedSequenceEntity1 {
+
+ @PrimaryKey(sequence="shared")
+ private int key;
+ }
+
+ @Entity
+ static class SharedSequenceEntity2 {
+
+ @PrimaryKey(sequence="shared")
+ private Integer key;
+ }
+
+ public void testSeparateSequence()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,SeparateSequenceEntity1> priIndex1 =
+ store.getPrimaryIndex
+ (Integer.class, SeparateSequenceEntity1.class);
+
+ PrimaryIndex<Integer,SeparateSequenceEntity2> priIndex2 =
+ store.getPrimaryIndex
+ (Integer.class, SeparateSequenceEntity2.class);
+
+ Transaction txn = txnBegin();
+ SeparateSequenceEntity1 e1 = new SeparateSequenceEntity1();
+ SeparateSequenceEntity2 e2 = new SeparateSequenceEntity2();
+ priIndex1.put(txn, e1);
+ assertEquals(1, e1.key);
+ priIndex2.putNoOverwrite(txn, e2);
+ assertEquals(Integer.valueOf(1), e2.key);
+ e1.key = 0;
+ priIndex1.putNoOverwrite(txn, e1);
+ assertEquals(2, e1.key);
+ e2.key = null;
+ priIndex2.put(txn, e2);
+ assertEquals(Integer.valueOf(2), e2.key);
+ txnCommit(txn);
+
+ close();
+ }
+
+ @Entity
+ static class SeparateSequenceEntity1 {
+
+ @PrimaryKey(sequence="seq1")
+ private int key;
+ }
+
+ @Entity
+ static class SeparateSequenceEntity2 {
+
+ @PrimaryKey(sequence="seq2")
+ private Integer key;
+ }
+
+ public void testCompositeSequence()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<CompositeSequenceEntity1.Key,CompositeSequenceEntity1>
+ priIndex1 =
+ store.getPrimaryIndex
+ (CompositeSequenceEntity1.Key.class,
+ CompositeSequenceEntity1.class);
+
+ PrimaryIndex<CompositeSequenceEntity2.Key,CompositeSequenceEntity2>
+ priIndex2 =
+ store.getPrimaryIndex
+ (CompositeSequenceEntity2.Key.class,
+ CompositeSequenceEntity2.class);
+
+ Transaction txn = txnBegin();
+ CompositeSequenceEntity1 e1 = new CompositeSequenceEntity1();
+ CompositeSequenceEntity2 e2 = new CompositeSequenceEntity2();
+ priIndex1.put(txn, e1);
+ assertEquals(1, e1.key.key);
+ priIndex2.putNoOverwrite(txn, e2);
+ assertEquals(Integer.valueOf(1), e2.key.key);
+ e1.key = null;
+ priIndex1.putNoOverwrite(txn, e1);
+ assertEquals(2, e1.key.key);
+ e2.key = null;
+ priIndex2.put(txn, e2);
+ assertEquals(Integer.valueOf(2), e2.key.key);
+ txnCommit(txn);
+
+ EntityCursor<CompositeSequenceEntity1> c1 = priIndex1.entities();
+ e1 = c1.next();
+ assertEquals(2, e1.key.key);
+ e1 = c1.next();
+ assertEquals(1, e1.key.key);
+ e1 = c1.next();
+ assertNull(e1);
+ c1.close();
+
+ EntityCursor<CompositeSequenceEntity2> c2 = priIndex2.entities();
+ e2 = c2.next();
+ assertEquals(Integer.valueOf(2), e2.key.key);
+ e2 = c2.next();
+ assertEquals(Integer.valueOf(1), e2.key.key);
+ e2 = c2.next();
+ assertNull(e2);
+ c2.close();
+
+ close();
+ }
+
+ @Entity
+ static class CompositeSequenceEntity1 {
+
+ @Persistent
+ static class Key implements Comparable<Key> {
+
+ @KeyField(1)
+ private int key;
+
+ public int compareTo(Key o) {
+ /* Reverse the natural order. */
+ return o.key - key;
+ }
+ }
+
+ @PrimaryKey(sequence="seq1")
+ private Key key;
+ }
+
+ /**
+ * Same as CompositeSequenceEntity1 but using Integer rather than int for
+ * the key type.
+ */
+ @Entity
+ static class CompositeSequenceEntity2 {
+
+ @Persistent
+ static class Key implements Comparable<Key> {
+
+ @KeyField(1)
+ private Integer key;
+
+ public int compareTo(Key o) {
+ /* Reverse the natural order. */
+ return o.key - key;
+ }
+ }
+
+ @PrimaryKey(sequence="seq2")
+ private Key key;
+ }
+
+ /**
+ * When opening read-only, secondaries are not opened when the primary is
+ * opened, causing a different code path to be used for opening
+ * secondaries. For a RawStore in particular, this caused an unreported
+ * NullPointerException in JE 3.0.12. No SR was created because the use
+ * case is very obscure and was discovered by code inspection.
+ */
+ public void testOpenRawStoreReadOnly()
+ throws DatabaseException {
+
+ open();
+ store.getPrimaryIndex(Integer.class, MyEntity.class);
+ close();
+
+ StoreConfig config = new StoreConfig();
+ config.setReadOnly(true);
+ config.setTransactional(envConfig.getTransactional());
+ RawStore rawStore = new RawStore(env, "test", config);
+
+ String clsName = MyEntity.class.getName();
+ rawStore.getSecondaryIndex(clsName, "secKey");
+
+ rawStore.close();
+ }
+
+ /**
+ * When opening an X_TO_MANY secondary that has a persistent key class, the
+ * key class was not recognized as being persistent if it was never before
+ * referenced when getSecondaryIndex was called. This was a bug in JE
+ * 3.0.12, reported on OTN. [#15103]
+ */
+ public void testToManyKeyClass()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Integer,ToManyKeyEntity> priIndex =
+ store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class);
+ SecondaryIndex<ToManyKey,Integer,ToManyKeyEntity> secIndex =
+ store.getSecondaryIndex(priIndex, ToManyKey.class, "key2");
+
+ priIndex.put(new ToManyKeyEntity());
+ secIndex.get(new ToManyKey());
+
+ close();
+ }
+
+ /**
+ * Test a fix for a bug where opening a TO_MANY secondary index would fail
+ * fail with "IllegalArgumentException: Wrong secondary key class: ..."
+ * when the store was opened read-only. [#15156]
+ */
+ public void testToManyReadOnly()
+ throws DatabaseException {
+
+ open();
+ PrimaryIndex<Integer,ToManyKeyEntity> priIndex =
+ store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class);
+ priIndex.put(new ToManyKeyEntity());
+ close();
+
+ openReadOnly();
+ priIndex = store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class);
+ SecondaryIndex<ToManyKey,Integer,ToManyKeyEntity> secIndex =
+ store.getSecondaryIndex(priIndex, ToManyKey.class, "key2");
+ secIndex.get(new ToManyKey());
+ close();
+ }
+
+ @Persistent
+ static class ToManyKey {
+
+ @KeyField(1)
+ int value = 99;
+ }
+
+ @Entity
+ static class ToManyKeyEntity {
+
+ @PrimaryKey
+ int key = 88;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ Set<ToManyKey> key2;
+
+ ToManyKeyEntity() {
+ key2 = new HashSet<ToManyKey>();
+ key2.add(new ToManyKey());
+ }
+ }
+
+
+
+ /**
+ * When Y is opened and X has a key with relatedEntity=Y.class, X should
+ * be opened automatically. If X is not opened, foreign key constraints
+ * will not be enforced. [#15358]
+ */
+ public void testAutoOpenRelatedEntity()
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,RelatedY> priY;
+ PrimaryIndex<Integer,RelatedX> priX;
+
+ /* Opening X should create (and open) Y and enforce constraints. */
+ open();
+ priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+ PersistTestUtils.assertDbExists
+ (true, env, STORE_NAME, RelatedY.class.getName(), null);
+ if (isTransactional) {
+ /* Constraint enforcement requires transactions. */
+ try {
+ priX.put(new RelatedX());
+ fail();
+ } catch (DatabaseException e) {
+ assertTrue
+ ("" + e.getMessage(), (e.getMessage().indexOf
+ ("foreign key not allowed: it is not present") >= 0) ||
+ (e.getMessage().indexOf("DB_FOREIGN_CONFLICT") >= 0));
+ }
+ }
+ priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+ priY.put(new RelatedY());
+ priX.put(new RelatedX());
+ close();
+
+ /* Delete should cascade even when X is not opened explicitly. */
+ open();
+ priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+ assertEquals(1, priY.count());
+ priY.delete(88);
+ assertEquals(0, priY.count());
+ priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+ assertEquals(0, priX.count()); /* Failed prior to [#15358] fix. */
+ close();
+ }
+
+ @Entity
+ static class RelatedX {
+
+ @PrimaryKey
+ int key = 99;
+
+ @SecondaryKey(relate=ONE_TO_ONE,
+ relatedEntity=RelatedY.class,
+ onRelatedEntityDelete=CASCADE)
+ int key2 = 88;
+
+ RelatedX() {
+ }
+ }
+
+ @Entity
+ static class RelatedY {
+
+ @PrimaryKey
+ int key = 88;
+
+ RelatedY() {
+ }
+ }
+
+ public void testSecondaryBulkLoad1()
+ throws DatabaseException {
+
+ doSecondaryBulkLoad(true);
+ }
+
+ public void testSecondaryBulkLoad2()
+ throws DatabaseException {
+
+ doSecondaryBulkLoad(false);
+ }
+
+ private void doSecondaryBulkLoad(boolean closeAndOpenNormally)
+ throws DatabaseException {
+
+ PrimaryIndex<Integer,RelatedX> priX;
+ PrimaryIndex<Integer,RelatedY> priY;
+ SecondaryIndex<Integer,Integer,RelatedX> secX;
+
+ /* Open priX with SecondaryBulkLoad=true. */
+ StoreConfig config = new StoreConfig();
+ config.setAllowCreate(true);
+ config.setSecondaryBulkLoad(true);
+ open(config);
+
+ /* Getting priX should not create the secondary index. */
+ priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+ PersistTestUtils.assertDbExists
+ (false, env, STORE_NAME, RelatedX.class.getName(), "key2");
+
+ /* We can put records that violate the secondary key constraint. */
+ priX.put(new RelatedX());
+
+ if (closeAndOpenNormally) {
+ /* Open normally and attempt to populate the secondary. */
+ close();
+ open();
+ if (isTransactional && DbCompat.POPULATE_ENFORCES_CONSTRAINTS) {
+ /* Constraint enforcement requires transactions. */
+ try {
+ /* Before adding the foreign key, constraint is violated. */
+ priX = store.getPrimaryIndex(Integer.class,
+ RelatedX.class);
+ fail();
+ } catch (DatabaseException e) {
+ assertTrue
+ (e.toString(),
+ e.toString().contains("foreign key not allowed"));
+ }
+ }
+ /* Open priX with SecondaryBulkLoad=true. */
+ close();
+ open(config);
+ /* Add the foreign key to avoid the constraint error. */
+ priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+ priY.put(new RelatedY());
+ /* Open normally and the secondary will be populated. */
+ close();
+ open();
+ priX = store.getPrimaryIndex(Integer.class, RelatedX.class);
+ PersistTestUtils.assertDbExists
+ (true, env, STORE_NAME, RelatedX.class.getName(), "key2");
+ secX = store.getSecondaryIndex(priX, Integer.class, "key2");
+ } else {
+ /* Get secondary index explicitly and it will be populated. */
+ if (isTransactional && DbCompat.POPULATE_ENFORCES_CONSTRAINTS) {
+ /* Constraint enforcement requires transactions. */
+ try {
+ /* Before adding the foreign key, constraint is violated. */
+ secX = store.getSecondaryIndex(priX, Integer.class,
+ "key2");
+ fail();
+ } catch (DatabaseException e) {
+ assertTrue
+ (e.toString(),
+ e.toString().contains("foreign key not allowed"));
+ }
+ }
+ /* Add the foreign key. */
+ priY = store.getPrimaryIndex(Integer.class, RelatedY.class);
+ priY.put(new RelatedY());
+ secX = store.getSecondaryIndex(priX, Integer.class, "key2");
+ PersistTestUtils.assertDbExists
+ (true, env, STORE_NAME, RelatedX.class.getName(), "key2");
+ }
+
+ RelatedX x = secX.get(88);
+ assertNotNull(x);
+ close();
+ }
+
+ public void testPersistentFields()
+ throws DatabaseException {
+
+ open();
+ PrimaryIndex<Integer, PersistentFields> pri =
+ store.getPrimaryIndex(Integer.class, PersistentFields.class);
+ PersistentFields o1 = new PersistentFields(-1, 1, 2, 3, 4, 5, 6);
+ assertNull(pri.put(o1));
+ PersistentFields o2 = pri.get(-1);
+ assertNotNull(o2);
+ assertEquals(0, o2.transient1);
+ assertEquals(0, o2.transient2);
+ assertEquals(0, o2.transient3);
+ assertEquals(4, o2.persistent1);
+ assertEquals(5, o2.persistent2);
+ assertEquals(6, o2.persistent3);
+ close();
+ }
+
+ @Entity
+ static class PersistentFields {
+
+ @PrimaryKey int key;
+
+ transient int transient1;
+ @NotPersistent int transient2;
+ @NotPersistent transient int transient3;
+
+ int persistent1;
+ @NotTransient int persistent2;
+ @NotTransient transient int persistent3;
+
+ PersistentFields(int k,
+ int t1,
+ int t2,
+ int t3,
+ int p1,
+ int p2,
+ int p3) {
+ key = k;
+ transient1 = t1;
+ transient2 = t2;
+ transient3 = t3;
+ persistent1 = p1;
+ persistent2 = p2;
+ persistent3 = p3;
+ }
+
+ private PersistentFields() {}
+ }
+
+ /**
+ * When a primary or secondary has a persistent key class, the key class
+ * was not recognized as being persistent when getPrimaryConfig,
+ * getSecondaryConfig, or getSubclassIndex was called, if that key class
+ * was not previously referenced. All three cases are tested by calling
+ * getSecondaryConfig. This was a bug in JE 3.3.69, reported on OTN.
+ * [#16407]
+ */
+ public void testKeyClassInitialization()
+ throws DatabaseException {
+
+ open();
+ store.getSecondaryConfig(ToManyKeyEntity.class, "key2");
+ close();
+ }
+
+ public void testKeyName()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Long, BookEntity> pri1 =
+ store.getPrimaryIndex(Long.class, BookEntity.class);
+ PrimaryIndex<Long, AuthorEntity> pri2 =
+ store.getPrimaryIndex(Long.class, AuthorEntity.class);
+
+ BookEntity book = new BookEntity();
+ pri1.put(book);
+ AuthorEntity author = new AuthorEntity();
+ author.bookIds.add(book.bookId);
+ pri2.put(author);
+
+ close();
+
+ open();
+ pri1 = store.getPrimaryIndex(Long.class, BookEntity.class);
+ pri2 = store.getPrimaryIndex(Long.class, AuthorEntity.class);
+ book = pri1.get(1L);
+ assertNotNull(book);
+ author = pri2.get(1L);
+ assertNotNull(author);
+ close();
+ }
+
+ @Entity
+ static class AuthorEntity {
+
+ @PrimaryKey(sequence="authorSeq")
+ long authorId;
+
+ @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=BookEntity.class,
+ name="bookId", onRelatedEntityDelete=NULLIFY)
+ Set<Long> bookIds = new HashSet<Long>();
+ }
+
+ @Entity
+ static class BookEntity {
+
+ @PrimaryKey(sequence="bookSeq")
+ long bookId;
+ }
+
+ /**
+ * Checks that we get an appropriate exception when storing an entity
+ * subclass instance, which contains a secondary key, without registering
+ * the subclass up front. [#16399]
+ */
+ public void testPutEntitySubclassWithoutRegisterClass()
+ throws DatabaseException {
+
+ open();
+
+ final PrimaryIndex<Long, Statement> pri =
+ store.getPrimaryIndex(Long.class, Statement.class);
+
+ final Transaction txn = txnBegin();
+ pri.put(txn, new Statement(1));
+ try {
+ pri.put(txn, new ExtendedStatement(2, null));
+ fail();
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.toString(), expected.getMessage().contains
+ ("Entity subclasses defining a secondary key must be " +
+ "registered by calling EntityModel.registerClass or " +
+ "EntityStore.getSubclassIndex before storing an instance " +
+ "of the subclass: " + ExtendedStatement.class.getName()));
+ }
+ txnAbort(txn);
+
+ close();
+ }
+
+ /**
+ * Checks that registerClass avoids an exception when storing an entity
+ * subclass instance, which defines a secondary key. [#16399]
+ */
+ public void testPutEntitySubclassWithRegisterClass()
+ throws DatabaseException {
+
+ open(ExtendedStatement.class);
+
+ final PrimaryIndex<Long, Statement> pri =
+ store.getPrimaryIndex(Long.class, Statement.class);
+
+ final Transaction txn = txnBegin();
+ pri.put(txn, new Statement(1));
+ pri.put(txn, new ExtendedStatement(2, "abc"));
+ txnCommit(txn);
+
+ final SecondaryIndex<String, Long, ExtendedStatement> sec =
+ store.getSubclassIndex(pri, ExtendedStatement.class,
+ String.class, "name");
+
+ ExtendedStatement o = sec.get("abc");
+ assertNotNull(o);
+ assertEquals(2, o.id);
+
+ close();
+ }
+
+ /**
+ * Same as testPutEntitySubclassWithRegisterClass but store the first
+ * instance of the subclass after closing and reopening the store,
+ * *without* calling registerClass. This ensures that a single call to
+ * registerClass is sufficient and subsequent use of the store does not
+ * require it. [#16399]
+ */
+ public void testPutEntitySubclassWithRegisterClass2()
+ throws DatabaseException {
+
+ open(ExtendedStatement.class);
+
+ PrimaryIndex<Long, Statement> pri =
+ store.getPrimaryIndex(Long.class, Statement.class);
+
+ Transaction txn = txnBegin();
+ pri.put(txn, new Statement(1));
+ txnCommit(txn);
+
+ close();
+ open();
+
+ pri = store.getPrimaryIndex(Long.class, Statement.class);
+
+ txn = txnBegin();
+ pri.put(txn, new ExtendedStatement(2, "abc"));
+ txnCommit(txn);
+
+ final SecondaryIndex<String, Long, ExtendedStatement> sec =
+ store.getSubclassIndex(pri, ExtendedStatement.class,
+ String.class, "name");
+
+ ExtendedStatement o = sec.get("abc");
+ assertNotNull(o);
+ assertEquals(2, o.id);
+
+ close();
+ }
+
+ /**
+ * Checks that getSubclassIndex can be used instead of registerClass to
+ * avoid an exception when storing an entity subclass instance, which
+ * defines a secondary key. [#16399]
+ */
+ public void testPutEntitySubclassWithGetSubclassIndex()
+ throws DatabaseException {
+
+ open();
+
+ final PrimaryIndex<Long, Statement> pri =
+ store.getPrimaryIndex(Long.class, Statement.class);
+
+ final SecondaryIndex<String, Long, ExtendedStatement> sec =
+ store.getSubclassIndex(pri, ExtendedStatement.class,
+ String.class, "name");
+
+ final Transaction txn = txnBegin();
+ pri.put(txn, new Statement(1));
+ pri.put(txn, new ExtendedStatement(2, "abc"));
+ txnCommit(txn);
+
+ ExtendedStatement o = sec.get("abc");
+ assertNotNull(o);
+ assertEquals(2, o.id);
+
+ close();
+ }
+
+ /**
+ * Same as testPutEntitySubclassWithGetSubclassIndex2 but store the first
+ * instance of the subclass after closing and reopening the store,
+ * *without* calling getSubclassIndex. This ensures that a single call to
+ * getSubclassIndex is sufficient and subsequent use of the store does not
+ * require it. [#16399]
+ */
+ public void testPutEntitySubclassWithGetSubclassIndex2()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<Long, Statement> pri =
+ store.getPrimaryIndex(Long.class, Statement.class);
+
+ SecondaryIndex<String, Long, ExtendedStatement> sec =
+ store.getSubclassIndex(pri, ExtendedStatement.class,
+ String.class, "name");
+
+ Transaction txn = txnBegin();
+ pri.put(txn, new Statement(1));
+ txnCommit(txn);
+
+ close();
+ open();
+
+ pri = store.getPrimaryIndex(Long.class, Statement.class);
+
+ txn = txnBegin();
+ pri.put(txn, new ExtendedStatement(2, "abc"));
+ txnCommit(txn);
+
+ sec = store.getSubclassIndex(pri, ExtendedStatement.class,
+ String.class, "name");
+
+ ExtendedStatement o = sec.get("abc");
+ assertNotNull(o);
+ assertEquals(2, o.id);
+
+ close();
+ }
+
+ /**
+ * Checks that secondary population occurs only once when an index is
+ * created, not every time it is opened, even when it is empty. This is a
+ * JE-only test because we don't have a portable way to get stats that
+ * indicate whether primary reads were performed. [#16399]
+ */
+
+ @Entity
+ static class Statement {
+
+ @PrimaryKey
+ long id;
+
+ Statement(long id) {
+ this.id = id;
+ }
+
+ private Statement() {}
+ }
+
+ @Persistent
+ static class ExtendedStatement extends Statement {
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ String name;
+
+ ExtendedStatement(long id, String name) {
+ super(id);
+ this.name = name;
+ }
+
+ private ExtendedStatement() {}
+ }
+
+ public void testCustomCompare()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<ReverseIntKey, CustomCompareEntity>
+ priIndex = store.getPrimaryIndex
+ (ReverseIntKey.class, CustomCompareEntity.class);
+
+ SecondaryIndex<ReverseIntKey, ReverseIntKey, CustomCompareEntity>
+ secIndex1 = store.getSecondaryIndex(priIndex, ReverseIntKey.class,
+ "secKey1");
+
+ SecondaryIndex<ReverseIntKey, ReverseIntKey, CustomCompareEntity>
+ secIndex2 = store.getSecondaryIndex(priIndex, ReverseIntKey.class,
+ "secKey2");
+
+ Transaction txn = txnBegin();
+ for (int i = 1; i <= 5; i += 1) {
+ assertTrue(priIndex.putNoOverwrite(txn,
+ new CustomCompareEntity(i)));
+ }
+ txnCommit(txn);
+
+ txn = txnBeginCursor();
+ EntityCursor<CustomCompareEntity> c = priIndex.entities(txn, null);
+ for (int i = 5; i >= 1; i -= 1) {
+ CustomCompareEntity e = c.next();
+ assertNotNull(e);
+ assertEquals(new ReverseIntKey(i), e.key);
+ }
+ c.close();
+ txnCommit(txn);
+
+ txn = txnBeginCursor();
+ c = secIndex1.entities(txn, null);
+ for (int i = -1; i >= -5; i -= 1) {
+ CustomCompareEntity e = c.next();
+ assertNotNull(e);
+ assertEquals(new ReverseIntKey(-i), e.key);
+ assertEquals(new ReverseIntKey(i), e.secKey1);
+ }
+ c.close();
+ txnCommit(txn);
+
+ txn = txnBeginCursor();
+ c = secIndex2.entities(txn, null);
+ for (int i = -1; i >= -5; i -= 1) {
+ CustomCompareEntity e = c.next();
+ assertNotNull(e);
+ assertEquals(new ReverseIntKey(-i), e.key);
+ assertTrue(e.secKey2.contains(new ReverseIntKey(i)));
+ }
+ c.close();
+ txnCommit(txn);
+
+ close();
+ }
+
+ @Entity
+ static class CustomCompareEntity {
+
+ @PrimaryKey
+ private ReverseIntKey key;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private ReverseIntKey secKey1;
+
+ @SecondaryKey(relate=ONE_TO_MANY)
+ private Set<ReverseIntKey> secKey2 = new HashSet<ReverseIntKey>();
+
+ private CustomCompareEntity() {}
+
+ CustomCompareEntity(int i) {
+ key = new ReverseIntKey(i);
+ secKey1 = new ReverseIntKey(-i);
+ secKey2.add(new ReverseIntKey(-i));
+ }
+ }
+
+ @Persistent
+ static class ReverseIntKey implements Comparable<ReverseIntKey> {
+
+ @KeyField(1)
+ private int key;
+
+ public int compareTo(ReverseIntKey o) {
+ /* Reverse the natural order. */
+ return o.key - key;
+ }
+
+ private ReverseIntKey() {}
+
+ ReverseIntKey(int key) {
+ this.key = key;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ return key == ((ReverseIntKey) o).key;
+ }
+
+ @Override
+ public int hashCode() {
+ return key;
+ }
+
+ @Override
+ public String toString() {
+ return "Key = " + key;
+ }
+ }
+
+ /**
+ * Ensures that custom comparators are persisted and work correctly during
+ * recovery. JE recovery uses comparators, so they are serialized and
+ * stored in the DatabaseImpl. They are deserialized during recovery prior
+ * to opening the EntityStore and its format catalog. But the formats are
+ * needed by the comparator, so they are specially created when needed.
+ *
+ * In particular we need to ensure that enum key fields work correctly,
+ * since their formats are not static (like simple type formats are).
+ * [#17140]
+ *
+ * Note that we don't need to actually cause a recovery in order to test
+ * the deserialization and subsequent use of comparators. The JE
+ * DatabaseConfig.setBtreeComparator method serializes and deserializes the
+ * comparator. The comparator is initialized on its first use, just as if
+ * recovery were run.
+ */
+ public void testStoredComparators()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<StoredComparatorEntity.Key,
+ StoredComparatorEntity> priIndex =
+ store.getPrimaryIndex(StoredComparatorEntity.Key.class,
+ StoredComparatorEntity.class);
+
+ SecondaryIndex<StoredComparatorEntity.MyEnum,
+ StoredComparatorEntity.Key,
+ StoredComparatorEntity> secIndex =
+ store.getSecondaryIndex
+ (priIndex, StoredComparatorEntity.MyEnum.class, "secKey");
+
+ final StoredComparatorEntity.Key[] priKeys =
+ new StoredComparatorEntity.Key[] {
+ new StoredComparatorEntity.Key
+ (StoredComparatorEntity.MyEnum.A, 1,
+ StoredComparatorEntity.MyEnum.A),
+ new StoredComparatorEntity.Key
+ (StoredComparatorEntity.MyEnum.A, 1,
+ StoredComparatorEntity.MyEnum.B),
+ new StoredComparatorEntity.Key
+ (StoredComparatorEntity.MyEnum.A, 2,
+ StoredComparatorEntity.MyEnum.A),
+ new StoredComparatorEntity.Key
+ (StoredComparatorEntity.MyEnum.A, 2,
+ StoredComparatorEntity.MyEnum.B),
+ new StoredComparatorEntity.Key
+ (StoredComparatorEntity.MyEnum.B, 1,
+ StoredComparatorEntity.MyEnum.A),
+ new StoredComparatorEntity.Key
+ (StoredComparatorEntity.MyEnum.B, 1,
+ StoredComparatorEntity.MyEnum.B),
+ new StoredComparatorEntity.Key
+ (StoredComparatorEntity.MyEnum.C, 0,
+ StoredComparatorEntity.MyEnum.C),
+ };
+
+ final StoredComparatorEntity.MyEnum[] secKeys =
+ new StoredComparatorEntity.MyEnum[] {
+ StoredComparatorEntity.MyEnum.C,
+ StoredComparatorEntity.MyEnum.B,
+ StoredComparatorEntity.MyEnum.A,
+ null,
+ StoredComparatorEntity.MyEnum.A,
+ StoredComparatorEntity.MyEnum.B,
+ StoredComparatorEntity.MyEnum.C,
+ };
+
+ assertEquals(priKeys.length, secKeys.length);
+ final int nEntities = priKeys.length;
+
+ Transaction txn = txnBegin();
+ for (int i = 0; i < nEntities; i += 1) {
+ priIndex.put(txn,
+ new StoredComparatorEntity(priKeys[i], secKeys[i]));
+ }
+ txnCommit(txn);
+
+ txn = txnBeginCursor();
+ EntityCursor<StoredComparatorEntity> entities =
+ priIndex.entities(txn, null);
+ for (int i = nEntities - 1; i >= 0; i -= 1) {
+ StoredComparatorEntity e = entities.next();
+ assertNotNull(e);
+ assertEquals(priKeys[i], e.key);
+ assertEquals(secKeys[i], e.secKey);
+ }
+ assertNull(entities.next());
+ entities.close();
+ txnCommit(txn);
+
+ txn = txnBeginCursor();
+ entities = secIndex.entities(txn, null);
+ for (StoredComparatorEntity.MyEnum myEnum :
+ EnumSet.allOf(StoredComparatorEntity.MyEnum.class)) {
+ for (int i = 0; i < nEntities; i += 1) {
+ if (secKeys[i] == myEnum) {
+ StoredComparatorEntity e = entities.next();
+ assertNotNull(e);
+ assertEquals(priKeys[i], e.key);
+ assertEquals(secKeys[i], e.secKey);
+ }
+ }
+ }
+ assertNull(entities.next());
+ entities.close();
+ txnCommit(txn);
+
+ close();
+ }
+
+ @Entity
+ static class StoredComparatorEntity {
+
+ enum MyEnum { A, B, C };
+
+ @Persistent
+ static class Key implements Comparable<Key> {
+
+ @KeyField(1)
+ MyEnum f1;
+
+ @KeyField(2)
+ Integer f2;
+
+ @KeyField(3)
+ MyEnum f3;
+
+ private Key() {}
+
+ Key(MyEnum f1, Integer f2, MyEnum f3) {
+ this.f1 = f1;
+ this.f2 = f2;
+ this.f3 = f3;
+ }
+
+ public int compareTo(Key o) {
+ /* Reverse the natural order. */
+ int i = f1.compareTo(o.f1);
+ if (i != 0) return -i;
+ i = f2.compareTo(o.f2);
+ if (i != 0) return -i;
+ i = f3.compareTo(o.f3);
+ if (i != 0) return -i;
+ return 0;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof Key)) {
+ return false;
+ }
+ Key o = (Key) other;
+ return f1 == o.f1 &&
+ f2.equals(o.f2) &&
+ f3 == o.f3;
+ }
+
+ @Override
+ public int hashCode() {
+ return f1.ordinal() + f2 + f3.ordinal();
+ }
+
+ @Override
+ public String toString() {
+ return "[Key " + f1 + ' ' + f2 + ' ' + f3 + ']';
+ }
+ }
+
+ @PrimaryKey
+ Key key;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ private MyEnum secKey;
+
+ private StoredComparatorEntity() {}
+
+ StoredComparatorEntity(Key key, MyEnum secKey) {
+ this.key = key;
+ this.secKey = secKey;
+ }
+
+ @Override
+ public String toString() {
+ return "[pri = " + key + " sec = " + secKey + ']';
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/PersistTestUtils.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/PersistTestUtils.java
new file mode 100644
index 0000000..354eb19
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/PersistTestUtils.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+package com.sleepycat.persist.test;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.Environment;
+
+class PersistTestUtils {
+
+ /**
+ * Asserts than a database expectExists or does not exist. If keyName is
+ * null, checks an entity database. If keyName is non-null, checks a
+ * secondary database.
+ */
+ static void assertDbExists(boolean expectExists,
+ Environment env,
+ String storeName,
+ String entityClassName,
+ String keyName) {
+ String fileName;
+ String dbName;
+ if (DbCompat.SEPARATE_DATABASE_FILES) {
+ fileName = storeName + '-' + entityClassName;
+ if (keyName != null) {
+ fileName += "-" + keyName;
+ }
+ dbName = null;
+ } else {
+ fileName = null;
+ dbName = "persist#" + storeName + '#' + entityClassName;
+ if (keyName != null) {
+ dbName += "#" + keyName;
+ }
+ }
+ boolean exists = DbCompat.databaseExists(env, fileName, dbName);
+ if (expectExists != exists) {
+ TestCase.fail
+ ((expectExists ? "Does not exist: " : "Does exist: ") +
+ dbName);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SequenceTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SequenceTest.java
new file mode 100644
index 0000000..f228426
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SequenceTest.java
@@ -0,0 +1,469 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import java.io.File;
+
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.util.DualTestCase;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.KeyField;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+/**
+ * @author Mark Hayes
+ */
+public class SequenceTest extends DualTestCase {
+
+ private File envHome;
+ private Environment env;
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ super.setUp();
+
+ envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
+ SharedTestUtils.emptyDir(envHome);
+ }
+
+ @Override
+ public void tearDown()
+ throws Exception {
+
+ super.tearDown();
+
+ envHome = null;
+ env = null;
+ }
+
+ public void testSequenceKeys()
+ throws Exception {
+
+ Class[] classes = {
+ SequenceEntity_Long.class,
+ SequenceEntity_Integer.class,
+ SequenceEntity_Short.class,
+ SequenceEntity_Byte.class,
+ SequenceEntity_tlong.class,
+ SequenceEntity_tint.class,
+ SequenceEntity_tshort.class,
+ SequenceEntity_tbyte.class,
+ SequenceEntity_Long_composite.class,
+ SequenceEntity_Integer_composite.class,
+ SequenceEntity_Short_composite.class,
+ SequenceEntity_Byte_composite.class,
+ SequenceEntity_tlong_composite.class,
+ SequenceEntity_tint_composite.class,
+ SequenceEntity_tshort_composite.class,
+ SequenceEntity_tbyte_composite.class,
+ };
+
+ EnvironmentConfig envConfig = TestEnv.TXN.getConfig();
+ envConfig.setAllowCreate(true);
+ env = create(envHome, envConfig);
+
+ StoreConfig storeConfig = new StoreConfig();
+ storeConfig.setAllowCreate(true);
+ storeConfig.setTransactional(true);
+ EntityStore store = new EntityStore(env, "foo", storeConfig);
+
+ long seq = 0;
+
+ for (int i = 0; i < classes.length; i += 1) {
+ Class entityCls = classes[i];
+ SequenceEntity entity = (SequenceEntity) entityCls.newInstance();
+ Class keyCls = entity.getKeyClass();
+
+ PrimaryIndex<Object,SequenceEntity> index =
+ store.getPrimaryIndex(keyCls, entityCls);
+ index.putNoReturn(entity);
+ seq += 1;
+ assertEquals(seq, entity.getKey());
+
+ index.putNoReturn(entity);
+ assertEquals(seq, entity.getKey());
+
+ entity.nullifyKey();
+ index.putNoReturn(entity);
+ seq += 1;
+ assertEquals(seq, entity.getKey());
+ }
+
+ store.close();
+ close(env);
+ env = null;
+ }
+
+ interface SequenceEntity {
+ Class getKeyClass();
+ long getKey();
+ void nullifyKey();
+ }
+
+ @Entity
+ static class SequenceEntity_Long implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Long priKey;
+
+ public Class getKeyClass() {
+ return Long.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_Integer implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Integer priKey;
+
+ public Class getKeyClass() {
+ return Integer.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_Short implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Short priKey;
+
+ public Class getKeyClass() {
+ return Short.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_Byte implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Byte priKey;
+
+ public Class getKeyClass() {
+ return Byte.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tlong implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ long priKey;
+
+ public Class getKeyClass() {
+ return Long.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = 0;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tint implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ int priKey;
+
+ public Class getKeyClass() {
+ return Integer.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = 0;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tshort implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ short priKey;
+
+ public Class getKeyClass() {
+ return Short.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = 0;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tbyte implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ byte priKey;
+
+ public Class getKeyClass() {
+ return Byte.class;
+ }
+
+ public long getKey() {
+ return priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = 0;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_Long_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ Long priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_Integer_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ Integer priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_Short_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ Short priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_Byte_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ Byte priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tlong_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ long priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tint_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ int priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tshort_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ short priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+
+ @Entity
+ static class SequenceEntity_tbyte_composite implements SequenceEntity {
+
+ @PrimaryKey(sequence="X")
+ Key priKey;
+
+ @Persistent
+ static class Key {
+ @KeyField(1)
+ byte priKey;
+ }
+
+ public Class getKeyClass() {
+ return Key.class;
+ }
+
+ public long getKey() {
+ return priKey.priKey;
+ }
+
+ public void nullifyKey() {
+ priKey = null;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SubclassIndexTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SubclassIndexTest.java
new file mode 100644
index 0000000..f1f5dd9
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/persist/test/SubclassIndexTest.java
@@ -0,0 +1,251 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.persist.test;
+
+import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
+
+import java.io.File;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.Transaction;
+import com.sleepycat.db.util.DualTestCase;
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+import com.sleepycat.persist.StoreConfig;
+import com.sleepycat.persist.model.AnnotationModel;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.EntityModel;
+import com.sleepycat.persist.model.Persistent;
+import com.sleepycat.persist.model.PrimaryKey;
+import com.sleepycat.persist.model.SecondaryKey;
+import com.sleepycat.util.test.SharedTestUtils;
+import com.sleepycat.util.test.TestEnv;
+
+public class SubclassIndexTest extends DualTestCase {
+
+ private File envHome;
+ private Environment env;
+ private EntityStore store;
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ super.setUp();
+
+ envHome = new File(System.getProperty(SharedTestUtils.DEST_DIR));
+ SharedTestUtils.emptyDir(envHome);
+ }
+
+ @Override
+ public void tearDown()
+ throws Exception {
+
+ super.tearDown();
+
+ envHome = null;
+ env = null;
+ }
+
+ private void open()
+ throws DatabaseException {
+
+ EnvironmentConfig envConfig = TestEnv.TXN.getConfig();
+ envConfig.setAllowCreate(true);
+ env = create(envHome, envConfig);
+
+ EntityModel model = new AnnotationModel();
+ model.registerClass(Manager.class);
+ model.registerClass(SalariedManager.class);
+
+ StoreConfig storeConfig = new StoreConfig();
+ storeConfig.setModel(model);
+ storeConfig.setAllowCreate(true);
+ storeConfig.setTransactional(true);
+ store = new EntityStore(env, "foo", storeConfig);
+ }
+
+ private void close()
+ throws DatabaseException {
+
+ store.close();
+ store = null;
+ close(env);
+ env = null;
+ }
+
+ public void testSubclassIndex()
+ throws DatabaseException {
+
+ open();
+
+ PrimaryIndex<String, Employee> employeesById =
+ store.getPrimaryIndex(String.class, Employee.class);
+
+ employeesById.put(new Employee("1"));
+ employeesById.put(new Manager("2", "a"));
+ employeesById.put(new Manager("3", "a"));
+ employeesById.put(new Manager("4", "b"));
+
+ Employee e;
+ Manager m;
+
+ e = employeesById.get("1");
+ assertNotNull(e);
+ assertTrue(!(e instanceof Manager));
+
+ /* Ensure DB exists BEFORE calling getSubclassIndex. [#15247] */
+ PersistTestUtils.assertDbExists
+ (true, env, "foo", Employee.class.getName(), "dept");
+
+ /* Normal use: Subclass index for a key in the subclass. */
+ SecondaryIndex<String, String, Manager> managersByDept =
+ store.getSubclassIndex
+ (employeesById, Manager.class, String.class, "dept");
+
+ m = managersByDept.get("a");
+ assertNotNull(m);
+ assertEquals("2", m.id);
+
+ m = managersByDept.get("b");
+ assertNotNull(m);
+ assertEquals("4", m.id);
+
+ Transaction txn = env.beginTransaction(null, null);
+ EntityCursor<Manager> managers = managersByDept.entities(txn, null);
+ try {
+ m = managers.next();
+ assertNotNull(m);
+ assertEquals("2", m.id);
+ m = managers.next();
+ assertNotNull(m);
+ assertEquals("3", m.id);
+ m = managers.next();
+ assertNotNull(m);
+ assertEquals("4", m.id);
+ m = managers.next();
+ assertNull(m);
+ } finally {
+ managers.close();
+ txn.commit();
+ }
+
+ /* Getting a subclass index for the entity class is also allowed. */
+ store.getSubclassIndex
+ (employeesById, Employee.class, String.class, "other");
+
+ /* Getting a subclass index for a base class key is not allowed. */
+ try {
+ store.getSubclassIndex
+ (employeesById, Manager.class, String.class, "other");
+ fail();
+ } catch (IllegalArgumentException expected) {
+ }
+
+ close();
+ }
+
+ /**
+ * Previously this tested that a secondary key database was added only
+ * AFTER storing the first instance of the subclass that defines the key.
+ * Now that we require registering the subclass up front, the database is
+ * created up front also. So this test is somewhat less useful, but still
+ * nice to have around. [#16399]
+ */
+ public void testAddSecKey()
+ throws DatabaseException {
+
+ open();
+ PrimaryIndex<String, Employee> employeesById =
+ store.getPrimaryIndex(String.class, Employee.class);
+ employeesById.put(new Employee("1"));
+ assertTrue(hasEntityKey("dept"));
+ close();
+
+ open();
+ employeesById = store.getPrimaryIndex(String.class, Employee.class);
+ assertTrue(hasEntityKey("dept"));
+ employeesById.put(new Manager("2", "a"));
+ assertTrue(hasEntityKey("dept"));
+ close();
+
+ open();
+ assertTrue(hasEntityKey("dept"));
+ close();
+
+ open();
+ employeesById = store.getPrimaryIndex(String.class, Employee.class);
+ assertTrue(hasEntityKey("salary"));
+ employeesById.put(new SalariedManager("3", "a", "111"));
+ assertTrue(hasEntityKey("salary"));
+ close();
+
+ open();
+ assertTrue(hasEntityKey("dept"));
+ assertTrue(hasEntityKey("salary"));
+ close();
+ }
+
+ private boolean hasEntityKey(String keyName) {
+ return store.getModel().
+ getRawType(Employee.class.getName()).
+ getEntityMetadata().
+ getSecondaryKeys().
+ keySet().
+ contains(keyName);
+ }
+
+ @Entity
+ private static class Employee {
+
+ @PrimaryKey
+ String id;
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ String other;
+
+ Employee(String id) {
+ this.id = id;
+ }
+
+ private Employee() {}
+ }
+
+ @Persistent
+ private static class Manager extends Employee {
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ String dept;
+
+ Manager(String id, String dept) {
+ super(id);
+ this.dept = dept;
+ }
+
+ private Manager() {}
+ }
+
+ @Persistent
+ private static class SalariedManager extends Manager {
+
+ @SecondaryKey(relate=MANY_TO_ONE)
+ String salary;
+
+ SalariedManager(String id, String dept, String salary) {
+ super(id, dept);
+ this.salary = salary;
+ }
+
+ private SalariedManager() {}
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java
new file mode 100644
index 0000000..0e09e15
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/ExceptionWrapperTest.java
@@ -0,0 +1,134 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.util.ExceptionUnwrapper;
+import com.sleepycat.util.IOExceptionWrapper;
+import com.sleepycat.util.RuntimeExceptionWrapper;
+
+/**
+ * @author Mark Hayes
+ */
+public class ExceptionWrapperTest extends TestCase {
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(ExceptionWrapperTest.class);
+ return suite;
+ }
+
+ public ExceptionWrapperTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+
+ SharedTestUtils.printTestName("ExceptionWrapperTest." + getName());
+ }
+
+ public void testIOWrapper() {
+ try {
+ throw new IOExceptionWrapper(new RuntimeException("msg"));
+ } catch (IOException e) {
+ Exception ee = ExceptionUnwrapper.unwrap(e);
+ assertTrue(ee instanceof RuntimeException);
+ assertEquals("msg", ee.getMessage());
+
+ Throwable t = ExceptionUnwrapper.unwrapAny(e);
+ assertTrue(t instanceof RuntimeException);
+ assertEquals("msg", t.getMessage());
+ }
+ }
+
+ public void testRuntimeWrapper() {
+ try {
+ throw new RuntimeExceptionWrapper(new IOException("msg"));
+ } catch (RuntimeException e) {
+ Exception ee = ExceptionUnwrapper.unwrap(e);
+ assertTrue(ee instanceof IOException);
+ assertEquals("msg", ee.getMessage());
+
+ Throwable t = ExceptionUnwrapper.unwrapAny(e);
+ assertTrue(t instanceof IOException);
+ assertEquals("msg", t.getMessage());
+ }
+ }
+
+ public void testErrorWrapper() {
+ try {
+ throw new RuntimeExceptionWrapper(new Error("msg"));
+ } catch (RuntimeException e) {
+ try {
+ ExceptionUnwrapper.unwrap(e);
+ fail();
+ } catch (Error ee) {
+ assertTrue(ee instanceof Error);
+ assertEquals("msg", ee.getMessage());
+ }
+
+ Throwable t = ExceptionUnwrapper.unwrapAny(e);
+ assertTrue(t instanceof Error);
+ assertEquals("msg", t.getMessage());
+ }
+ }
+
+ /**
+ * Generates a stack trace for a nested exception and checks the output
+ * for the nested exception.
+ */
+ public void testStackTrace() {
+
+ /* Nested stack traces are not avilable in Java 1.3. */
+ String version = System.getProperty("java.version");
+ if (version.startsWith("1.3.")) {
+ return;
+ }
+
+ Exception ex = new Exception("some exception");
+ String causedBy = "Caused by: java.lang.Exception: some exception";
+
+ try {
+ throw new RuntimeExceptionWrapper(ex);
+ } catch (RuntimeException e) {
+ StringWriter sw = new StringWriter();
+ e.printStackTrace(new PrintWriter(sw));
+ String s = sw.toString();
+ assertTrue(s.indexOf(causedBy) != -1);
+ }
+
+ try {
+ throw new IOExceptionWrapper(ex);
+ } catch (IOException e) {
+ StringWriter sw = new StringWriter();
+ e.printStackTrace(new PrintWriter(sw));
+ String s = sw.toString();
+ assertTrue(s.indexOf(causedBy) != -1);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/util/test/FastOutputStreamTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/FastOutputStreamTest.java
new file mode 100644
index 0000000..a17ea9b
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/FastOutputStreamTest.java
@@ -0,0 +1,66 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.util.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.util.FastOutputStream;
+
+/**
+ * @author Mark Hayes
+ */
+public class FastOutputStreamTest extends TestCase {
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(FastOutputStreamTest.class);
+ return suite;
+ }
+
+ public FastOutputStreamTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+
+ SharedTestUtils.printTestName("FastOutputStreamTest." + getName());
+ }
+
+ public void testBufferSizing() {
+ FastOutputStream fos = new FastOutputStream();
+ assertEquals
+ (FastOutputStream.DEFAULT_INIT_SIZE, fos.getBufferBytes().length);
+
+ /* Write X+1 bytes, expect array size 2X+1 */
+ fos.write(new byte[FastOutputStream.DEFAULT_INIT_SIZE + 1]);
+ assertEquals
+ ((FastOutputStream.DEFAULT_INIT_SIZE * 2) + 1,
+ fos.getBufferBytes().length);
+
+ /* Write X+1 bytes, expect array size 4X+3 = (2(2X+1) + 1) */
+ fos.write(new byte[FastOutputStream.DEFAULT_INIT_SIZE + 1]);
+ assertEquals
+ ((FastOutputStream.DEFAULT_INIT_SIZE * 4) + 3,
+ fos.getBufferBytes().length);
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/util/test/PackedIntegerTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/PackedIntegerTest.java
new file mode 100644
index 0000000..46e2a4e
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/PackedIntegerTest.java
@@ -0,0 +1,191 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.util.test;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+
+import com.sleepycat.util.PackedInteger;
+
+public class PackedIntegerTest extends TestCase
+{
+ static final long V119 = 119L;
+ static final long MAX_1 = 0xFFL;
+ static final long MAX_2 = 0xFFFFL;
+ static final long MAX_3 = 0xFFFFFFL;
+ static final long MAX_4 = 0xFFFFFFFFL;
+ static final long MAX_5 = 0xFFFFFFFFFFL;
+ static final long MAX_6 = 0xFFFFFFFFFFFFL;
+ static final long MAX_7 = 0xFFFFFFFFFFFFFFL;
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+
+ return new PackedIntegerTest();
+ }
+
+ public PackedIntegerTest() {
+
+ super("PackedIntegerTest");
+ }
+
+ @Override
+ public void runTest() {
+
+ /* Packed int tests. */
+
+ testIntRange(-V119, V119, 1);
+
+ testIntRange(-MAX_1 - V119, -1 - V119, 2);
+ testIntRange(1 + V119, MAX_1 + V119, 2);
+
+ testIntRange(-MAX_2 - V119, -MAX_2 + 99, 3);
+ testIntRange(-MAX_1 - V119 - 99, -MAX_1 - V119 - 1, 3);
+ testIntRange(MAX_1 + V119 + 1, MAX_1 + V119 + 99, 3);
+ testIntRange(MAX_2 - 99, MAX_2 + V119, 3);
+
+ testIntRange(-MAX_3 - V119, -MAX_3 + 99, 4);
+ testIntRange(-MAX_2 - V119 - 99, -MAX_2 - V119 - 1, 4);
+ testIntRange(MAX_2 + V119 + 1, MAX_2 + V119 + 99, 4);
+ testIntRange(MAX_3 - 99, MAX_3 + V119, 4);
+
+ testIntRange(Integer.MIN_VALUE, Integer.MIN_VALUE + 99, 5);
+ testIntRange(Integer.MAX_VALUE - 99, Integer.MAX_VALUE, 5);
+
+ /* Packed long tests. */
+
+ testLongRange(-V119, V119, 1);
+
+ testLongRange(-MAX_1 - V119, -1 - V119, 2);
+ testLongRange(1 + V119, MAX_1 + V119, 2);
+
+ testLongRange(-MAX_2 - V119, -MAX_2 + 99, 3);
+ testLongRange(-MAX_1 - V119 - 99, -MAX_1 - V119 - 1, 3);
+ testLongRange(MAX_1 + V119 + 1, MAX_1 + V119 + 99, 3);
+ testLongRange(MAX_2 - 99, MAX_2 + V119, 3);
+
+ testLongRange(-MAX_3 - V119, -MAX_3 + 99, 4);
+ testLongRange(-MAX_2 - V119 - 99, -MAX_2 - V119 - 1, 4);
+ testLongRange(MAX_2 + V119 + 1, MAX_2 + V119 + 99, 4);
+ testLongRange(MAX_3 - 99, MAX_3 + V119, 4);
+
+ testLongRange(-MAX_4 - V119, -MAX_4 + 99, 5);
+ testLongRange(-MAX_3 - V119 - 99, -MAX_3 - V119 - 1, 5);
+ testLongRange(MAX_3 + V119 + 1, MAX_3 + V119 + 99, 5);
+ testLongRange(MAX_4 - 99, MAX_4 + V119, 5);
+
+ testLongRange(-MAX_5 - V119, -MAX_5 + 99, 6);
+ testLongRange(-MAX_4 - V119 - 99, -MAX_4 - V119 - 1, 6);
+ testLongRange(MAX_4 + V119 + 1, MAX_4 + V119 + 99, 6);
+ testLongRange(MAX_5 - 99, MAX_5 + V119, 6);
+
+ testLongRange(-MAX_6 - V119, -MAX_6 + 99, 7);
+ testLongRange(-MAX_5 - V119 - 99, -MAX_5 - V119 - 1, 7);
+ testLongRange(MAX_5 + V119 + 1, MAX_5 + V119 + 99, 7);
+ testLongRange(MAX_6 - 99, MAX_6 + V119, 7);
+
+ testLongRange(-MAX_7 - V119, -MAX_7 + 99, 8);
+ testLongRange(-MAX_6 - V119 - 99, -MAX_6 - V119 - 1, 8);
+ testLongRange(MAX_6 + V119 + 1, MAX_6 + V119 + 99, 8);
+ testLongRange(MAX_7 - 99, MAX_7 + V119, 8);
+
+ testLongRange(Long.MIN_VALUE, Long.MIN_VALUE + 99, 9);
+ testLongRange(Long.MAX_VALUE - 99, Long.MAX_VALUE - 1, 9);
+ }
+
+ private void testIntRange(long firstValue,
+ long lastValue,
+ int bytesExpected) {
+
+ byte[] buf = new byte[1000];
+ int off = 0;
+
+ for (long longI = firstValue; longI <= lastValue; longI += 1) {
+ int i = (int) longI;
+ int before = off;
+ off = PackedInteger.writeInt(buf, off, i);
+ int bytes = off - before;
+ if (bytes != bytesExpected) {
+ fail("output of value=" + i + " bytes=" + bytes +
+ " bytesExpected=" + bytesExpected);
+ }
+ bytes = PackedInteger.getWriteIntLength(i);
+ if (bytes != bytesExpected) {
+ fail("count of value=" + i + " bytes=" + bytes +
+ " bytesExpected=" + bytesExpected);
+ }
+ }
+
+ off = 0;
+
+ for (long longI = firstValue; longI <= lastValue; longI += 1) {
+ int i = (int) longI;
+ int bytes = PackedInteger.getReadIntLength(buf, off);
+ if (bytes != bytesExpected) {
+ fail("count of value=" + i + " bytes=" + bytes +
+ " bytesExpected=" + bytesExpected);
+ }
+ int value = PackedInteger.readInt(buf, off);
+ if (value != i) {
+ fail("input of value=" + i + " but got=" + value);
+ }
+ off += bytes;
+ }
+ }
+
+ private void testLongRange(long firstValue,
+ long lastValue,
+ int bytesExpected) {
+
+ byte[] buf = new byte[2000];
+ int off = 0;
+
+ for (long longI = firstValue; longI <= lastValue; longI += 1) {
+ long i = longI;
+ int before = off;
+ off = PackedInteger.writeLong(buf, off, i);
+ int bytes = off - before;
+ if (bytes != bytesExpected) {
+ fail("output of value=" + i + " bytes=" + bytes +
+ " bytesExpected=" + bytesExpected);
+ }
+ bytes = PackedInteger.getWriteLongLength(i);
+ if (bytes != bytesExpected) {
+ fail("count of value=" + i + " bytes=" + bytes +
+ " bytesExpected=" + bytesExpected);
+ }
+ }
+
+ off = 0;
+
+ for (long longI = firstValue; longI <= lastValue; longI += 1) {
+ long i = longI;
+ int bytes = PackedInteger.getReadLongLength(buf, off);
+ if (bytes != bytesExpected) {
+ fail("count of value=" + i + " bytes=" + bytes +
+ " bytesExpected=" + bytesExpected);
+ }
+ long value = PackedInteger.readLong(buf, off);
+ if (value != i) {
+ fail("input of value=" + i + " but got=" + value);
+ }
+ off += bytes;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/util/test/SharedTestUtils.java b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/SharedTestUtils.java
new file mode 100644
index 0000000..109dd0c
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/SharedTestUtils.java
@@ -0,0 +1,178 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import junit.framework.TestCase;
+
+import com.sleepycat.db.DatabaseConfig;
+
+/**
+ * Test utility methods shared by JE and DB core tests. Collections and
+ * persist package test are used in both JE and DB core.
+ */
+public class SharedTestUtils {
+
+ /* Common system properties for running tests */
+ public static String DEST_DIR = "testdestdir";
+ public static String NO_SYNC = "txnnosync";
+ public static String LONG_TEST = "longtest";
+
+ public static final DatabaseConfig DBCONFIG_CREATE = new DatabaseConfig();
+ static {
+ DBCONFIG_CREATE.setAllowCreate(true);
+ }
+
+ private static File getTestDir() {
+ String dir = System.getProperty(DEST_DIR);
+ if (dir == null || dir.length() == 0) {
+ throw new IllegalArgumentException
+ ("System property must be set to test data directory: " +
+ DEST_DIR);
+ }
+ return new File(dir);
+ }
+
+ /**
+ * @return true if long running tests are enabled via setting the system
+ * property longtest=true.
+ */
+ public static boolean runLongTests() {
+ String longTestProp = System.getProperty(LONG_TEST);
+ if ((longTestProp != null) &&
+ longTestProp.equalsIgnoreCase("true")) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ public static void printTestName(String name) {
+ // don't want verbose printing for now
+ // System.out.println(name);
+ }
+
+ public static File getExistingDir(String name) {
+ File dir = new File(getTestDir(), name);
+ if (!dir.exists() || !dir.isDirectory()) {
+ throw new IllegalStateException(
+ "Not an existing directory: " + dir);
+ }
+ return dir;
+ }
+
+ public static File getNewDir() {
+ return getNewDir("test-dir");
+ }
+
+ public static void emptyDir(File dir) {
+ if (dir.isDirectory()) {
+ String[] files = dir.list();
+ if (files != null) {
+ for (int i = 0; i < files.length; i += 1) {
+ new File(dir, files[i]).delete();
+ }
+ }
+ } else {
+ dir.delete();
+ dir.mkdirs();
+ }
+ }
+
+ public static File getNewDir(String name) {
+ File dir = new File(getTestDir(), name);
+ emptyDir(dir);
+ return dir;
+ }
+
+ public static File getNewFile() {
+ return getNewFile("test-file");
+ }
+
+ public static File getNewFile(String name) {
+ return getNewFile(getTestDir(), name);
+ }
+
+ public static File getNewFile(File dir, String name) {
+ File file = new File(dir, name);
+ file.delete();
+ return file;
+ }
+
+ public static boolean copyResource(Class cls, String fileName, File toDir)
+ throws IOException {
+
+ InputStream in = cls.getResourceAsStream("testdata/" + fileName);
+ if (in == null) {
+ return false;
+ }
+ in = new BufferedInputStream(in);
+ File file = new File(toDir, fileName);
+ OutputStream out = new FileOutputStream(file);
+ out = new BufferedOutputStream(out);
+ int c;
+ while ((c = in.read()) >= 0) out.write(c);
+ in.close();
+ out.close();
+ return true;
+ }
+
+ public static String qualifiedTestName(TestCase test) {
+
+ String s = test.getClass().getName();
+ int i = s.lastIndexOf('.');
+ if (i >= 0) {
+ s = s.substring(i + 1);
+ }
+ return s + '.' + test.getName();
+ }
+
+ /**
+ * Copies all files in fromDir to toDir. Does not copy subdirectories.
+ */
+ public static void copyFiles(File fromDir, File toDir)
+ throws IOException {
+
+ String[] names = fromDir.list();
+ if (names != null) {
+ for (int i = 0; i < names.length; i += 1) {
+ File fromFile = new File(fromDir, names[i]);
+ if (fromFile.isDirectory()) {
+ continue;
+ }
+ File toFile = new File(toDir, names[i]);
+ int len = (int) fromFile.length();
+ byte[] data = new byte[len];
+ FileInputStream fis = null;
+ FileOutputStream fos = null;
+ try {
+ fis = new FileInputStream(fromFile);
+ fos = new FileOutputStream(toFile);
+ fis.read(data);
+ fos.write(data);
+ } finally {
+ if (fis != null) {
+ fis.close();
+ }
+ if (fos != null) {
+ fos.close();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/util/test/TestEnv.java b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/TestEnv.java
new file mode 100644
index 0000000..d065a43
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/TestEnv.java
@@ -0,0 +1,142 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import com.sleepycat.compat.DbCompat;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+
+/**
+ * @author Mark Hayes
+ */
+public class TestEnv {
+
+ public static final TestEnv BDB;
+ public static final TestEnv CDB;
+ public static final TestEnv TXN;
+ static {
+ EnvironmentConfig config;
+
+ config = newEnvConfig();
+ BDB = new TestEnv("bdb", config);
+
+ if (DbCompat.CDB) {
+ config = newEnvConfig();
+ DbCompat.setInitializeCDB(config, true);
+ CDB = new TestEnv("cdb", config);
+ } else {
+ CDB = null;
+ }
+
+ config = newEnvConfig();
+ config.setTransactional(true);
+ DbCompat.setInitializeLocking(config, true);
+ TXN = new TestEnv("txn", config);
+ }
+
+ private static EnvironmentConfig newEnvConfig() {
+
+ EnvironmentConfig config = new EnvironmentConfig();
+ config.setTxnNoSync(Boolean.getBoolean(SharedTestUtils.NO_SYNC));
+ if (DbCompat.MEMORY_SUBSYSTEM) {
+ DbCompat.setInitializeCache(config, true);
+ }
+ return config;
+ }
+
+ public static final TestEnv[] ALL;
+ static {
+ if (DbCompat.CDB) {
+ ALL = new TestEnv[] { BDB, CDB, TXN };
+ } else {
+ ALL = new TestEnv[] { BDB, TXN };
+ }
+ }
+
+ private final String name;
+ private final EnvironmentConfig config;
+
+ protected TestEnv(String name, EnvironmentConfig config) {
+
+ this.name = name;
+ this.config = config;
+ }
+
+ public String getName() {
+
+ return name;
+ }
+
+ public EnvironmentConfig getConfig() {
+ return config;
+ }
+
+ void copyConfig(EnvironmentConfig copyToConfig) {
+ DbCompat.setInitializeCache
+ (copyToConfig, DbCompat.getInitializeCache(config));
+ DbCompat.setInitializeLocking
+ (copyToConfig, DbCompat.getInitializeLocking(config));
+ DbCompat.setInitializeCDB
+ (copyToConfig, DbCompat.getInitializeCDB(config));
+ copyToConfig.setTransactional(config.getTransactional());
+ }
+
+ public boolean isTxnMode() {
+
+ return config.getTransactional();
+ }
+
+ public boolean isCdbMode() {
+
+ return DbCompat.getInitializeCDB(config);
+ }
+
+ public Environment open(String testName)
+ throws IOException, DatabaseException {
+
+ return open(testName, true);
+ }
+
+ public Environment open(String testName, boolean create)
+ throws IOException, DatabaseException {
+
+ config.setAllowCreate(create);
+ /* OLDEST deadlock detection on DB matches the use of timeouts on JE.*/
+ DbCompat.setLockDetectModeOldest(config);
+ File dir = getDirectory(testName, create);
+ return newEnvironment(dir, config);
+ }
+
+ /**
+ * Is overridden in XACollectionTest.
+ * @throws FileNotFoundException from DB core.
+ */
+ protected Environment newEnvironment(File dir, EnvironmentConfig config)
+ throws DatabaseException, FileNotFoundException {
+
+ return new Environment(dir, config);
+ }
+
+ public File getDirectory(String testName) {
+ return getDirectory(testName, true);
+ }
+
+ public File getDirectory(String testName, boolean create) {
+ if (create) {
+ return SharedTestUtils.getNewDir(testName);
+ } else {
+ return SharedTestUtils.getExistingDir(testName);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/util/test/TxnTestCase.java b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/TxnTestCase.java
new file mode 100644
index 0000000..249ad2c
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/TxnTestCase.java
@@ -0,0 +1,230 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.File;
+import java.util.Enumeration;
+
+import junit.framework.TestSuite;
+
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.Environment;
+import com.sleepycat.db.EnvironmentConfig;
+import com.sleepycat.db.Transaction;
+import com.sleepycat.db.TransactionConfig;
+import com.sleepycat.db.util.DualTestCase;
+
+/**
+ * Permutes test cases over three transaction types: null (non-transactional),
+ * auto-commit, and user (explicit).
+ *
+ * <p>Overrides runTest, setUp and tearDown to open/close the environment and
+ * to set up protected members for use by test cases.</p>
+ *
+ * <p>If a subclass needs to override setUp or tearDown, the overridden method
+ * should call super.setUp or super.tearDown.</p>
+ *
+ * <p>When writing a test case based on this class, write it as if a user txn
+ * were always used: call txnBegin, txnCommit and txnAbort for all write
+ * operations. Use the isTransactional protected field for setup of a database
+ * config.</p>
+ */
+public abstract class TxnTestCase extends DualTestCase {
+
+ public static final String TXN_NULL = "txn-null";
+ public static final String TXN_AUTO = "txn-auto";
+ public static final String TXN_USER = "txn-user";
+
+ protected File envHome;
+ protected Environment env;
+ protected EnvironmentConfig envConfig;
+ protected String txnType;
+ protected boolean isTransactional;
+
+ /**
+ * Returns a txn test suite. If txnTypes is null, all three types are run.
+ */
+ public static TestSuite txnTestSuite(Class<?> testCaseClass,
+ EnvironmentConfig envConfig,
+ String[] txnTypes) {
+ if (txnTypes == null) {
+ txnTypes =
+ isReplicatedTest(testCaseClass) ?
+ new String[] { // Skip non-transactional tests
+ TxnTestCase.TXN_USER,
+ TxnTestCase.TXN_AUTO } :
+ new String[] { TxnTestCase.TXN_NULL,
+ TxnTestCase.TXN_USER,
+ TxnTestCase.TXN_AUTO } ;
+ }
+ if (envConfig == null) {
+ envConfig = new EnvironmentConfig();
+ envConfig.setAllowCreate(true);
+ }
+ TestSuite suite = new TestSuite();
+ for (int i = 0; i < txnTypes.length; i += 1) {
+ TestSuite baseSuite = new TestSuite(testCaseClass);
+ Enumeration e = baseSuite.tests();
+ while (e.hasMoreElements()) {
+ TxnTestCase test = (TxnTestCase) e.nextElement();
+ test.txnInit(envConfig, txnTypes[i]);
+ suite.addTest(test);
+ }
+ }
+ return suite;
+ }
+
+ private void txnInit(EnvironmentConfig envConfig, String txnType) {
+
+ this.envConfig = envConfig;
+ this.txnType = txnType;
+ isTransactional = (txnType != TXN_NULL);
+ }
+
+ @Override
+ public void setUp()
+ throws Exception {
+
+ super.setUp();
+ envHome = SharedTestUtils.getNewDir();
+ }
+
+ @Override
+ public void runTest()
+ throws Throwable {
+
+ openEnv();
+ super.runTest();
+ closeEnv();
+ }
+
+ @Override
+ public void tearDown()
+ throws Exception {
+
+ /* Set test name for reporting; cannot be done in the ctor or setUp. */
+ setName(txnType + ':' + getName());
+
+ super.tearDown();
+ env = null;
+
+ try {
+ SharedTestUtils.emptyDir(envHome);
+ } catch (Throwable e) {
+ System.out.println("tearDown: " + e);
+ }
+ }
+
+ /**
+ * Closes the environment and sets the env field to null.
+ * Used for closing and reopening the environment.
+ */
+ public void closeEnv()
+ throws DatabaseException {
+
+ if (env != null) {
+ close(env);
+ env = null;
+ }
+ }
+
+ /**
+ * Opens the environment based on the txnType for this test case.
+ * Used for closing and reopening the environment.
+ */
+ public void openEnv()
+ throws DatabaseException {
+
+ if (txnType == TXN_NULL) {
+ TestEnv.BDB.copyConfig(envConfig);
+ env = create(envHome, envConfig);
+ } else if (txnType == TXN_AUTO) {
+ TestEnv.TXN.copyConfig(envConfig);
+ env = create(envHome, envConfig);
+ } else if (txnType == TXN_USER) {
+ TestEnv.TXN.copyConfig(envConfig);
+ env = create(envHome, envConfig);
+ } else {
+ assert false;
+ }
+ }
+
+ /**
+ * Begin a txn if in TXN_USER mode; otherwise return null;
+ */
+ protected Transaction txnBegin()
+ throws DatabaseException {
+
+ return txnBegin(null, null);
+ }
+
+ /**
+ * Begin a txn if in TXN_USER mode; otherwise return null;
+ */
+ protected Transaction txnBegin(Transaction parentTxn,
+ TransactionConfig config)
+ throws DatabaseException {
+
+ /*
+ * Replicated tests need a user txn for auto txns args to
+ * Database.get/put methods.
+ */
+ if (txnType == TXN_USER ||
+ (isReplicatedTest(getClass()) && txnType == TXN_AUTO)) {
+ return env.beginTransaction(parentTxn, config);
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Begin a txn if in TXN_USER or TXN_AUTO mode; otherwise return null;
+ */
+ protected Transaction txnBeginCursor()
+ throws DatabaseException {
+
+ return txnBeginCursor(null, null);
+ }
+
+ /**
+ * Begin a txn if in TXN_USER or TXN_AUTO mode; otherwise return null;
+ */
+ protected Transaction txnBeginCursor(Transaction parentTxn,
+ TransactionConfig config)
+ throws DatabaseException {
+
+ if (txnType == TXN_USER || txnType == TXN_AUTO) {
+ return env.beginTransaction(parentTxn, config);
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Commit a txn if non-null.
+ */
+ protected void txnCommit(Transaction txn)
+ throws DatabaseException {
+
+ if (txn != null) {
+ txn.commit();
+ }
+ }
+
+ /**
+ * Commit a txn if non-null.
+ */
+ protected void txnAbort(Transaction txn)
+ throws DatabaseException {
+
+ if (txn != null) {
+ txn.abort();
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr024/src/com/sleepycat/util/test/UtfTest.java b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/UtfTest.java
new file mode 100644
index 0000000..7b0ef53
--- /dev/null
+++ b/db-4.8.30/test/scr024/src/com/sleepycat/util/test/UtfTest.java
@@ -0,0 +1,163 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ *
+ * $Id$
+ */
+
+package com.sleepycat.util.test;
+
+import java.io.DataOutputStream;
+import java.util.Arrays;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import com.sleepycat.util.FastOutputStream;
+import com.sleepycat.util.UtfOps;
+
+/**
+ * @author Mark Hayes
+ */
+public class UtfTest extends TestCase {
+
+ public static void main(String[] args) {
+ junit.framework.TestResult tr =
+ junit.textui.TestRunner.run(suite());
+ if (tr.errorCount() > 0 ||
+ tr.failureCount() > 0) {
+ System.exit(1);
+ } else {
+ System.exit(0);
+ }
+ }
+
+ public static Test suite() {
+ TestSuite suite = new TestSuite(UtfTest.class);
+ return suite;
+ }
+
+ public UtfTest(String name) {
+
+ super(name);
+ }
+
+ @Override
+ public void setUp() {
+
+ SharedTestUtils.printTestName("UtfTest." + getName());
+ }
+
+ /**
+ * Compares the UtfOps implementation to the java.util.DataOutputStream
+ * (and by implication DataInputStream) implementation, character for
+ * character in the full Unicode set.
+ */
+ public void testMultibyte()
+ throws Exception {
+
+ char c = 0;
+ byte[] buf = new byte[10];
+ byte[] javaBuf = new byte[10];
+ char[] cArray = new char[1];
+ FastOutputStream javaBufStream = new FastOutputStream(javaBuf);
+ DataOutputStream javaOutStream = new DataOutputStream(javaBufStream);
+
+ try {
+ for (int cInt = Character.MIN_VALUE; cInt <= Character.MAX_VALUE;
+ cInt += 1) {
+ c = (char) cInt;
+ cArray[0] = c;
+ int byteLen = UtfOps.getByteLength(cArray);
+
+ javaBufStream.reset();
+ javaOutStream.writeUTF(new String(cArray));
+ int javaByteLen = javaBufStream.size() - 2;
+
+ if (byteLen != javaByteLen) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " UtfOps size " + byteLen +
+ " != JavaIO size " + javaByteLen);
+ }
+
+ Arrays.fill(buf, (byte) 0);
+ UtfOps.charsToBytes(cArray, 0, buf, 0, 1);
+
+ if (byteLen == 1 && buf[0] == (byte) 0xff) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " was encoded as FF, which is reserved for null");
+ }
+
+ for (int i = 0; i < byteLen; i += 1) {
+ if (buf[i] != javaBuf[i + 2]) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " byte offset " + i +
+ " UtfOps byte " + Integer.toHexString(buf[i]) +
+ " != JavaIO byte " +
+ Integer.toHexString(javaBuf[i + 2]));
+ }
+ }
+
+ int charLen = UtfOps.getCharLength(buf, 0, byteLen);
+ if (charLen != 1) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " UtfOps char len " + charLen +
+ " but should be one");
+ }
+
+ cArray[0] = (char) 0;
+ int len = UtfOps.bytesToChars(buf, 0, cArray, 0, byteLen,
+ true);
+ if (len != byteLen) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " UtfOps bytesToChars(w/byteLen) len " + len +
+ " but should be " + byteLen);
+ }
+
+ if (cArray[0] != c) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " UtfOps bytesToChars(w/byteLen) char " +
+ Integer.toHexString(cArray[0]));
+ }
+
+ cArray[0] = (char) 0;
+ len = UtfOps.bytesToChars(buf, 0, cArray, 0, 1, false);
+ if (len != byteLen) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " UtfOps bytesToChars(w/charLen) len " + len +
+ " but should be " + byteLen);
+ }
+
+ if (cArray[0] != c) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " UtfOps bytesToChars(w/charLen) char " +
+ Integer.toHexString(cArray[0]));
+ }
+
+ String s = new String(cArray, 0, 1);
+ byte[] sBytes = UtfOps.stringToBytes(s);
+ if (sBytes.length != byteLen) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " UtfOps stringToBytes() len " + sBytes.length +
+ " but should be " + byteLen);
+ }
+
+ for (int i = 0; i < byteLen; i += 1) {
+ if (sBytes[i] != javaBuf[i + 2]) {
+ fail("Character 0x" + Integer.toHexString(c) +
+ " byte offset " + i +
+ " UtfOps byte " + Integer.toHexString(sBytes[i]) +
+ " != JavaIO byte " +
+ Integer.toHexString(javaBuf[i + 2]));
+ }
+ }
+ }
+ } catch (Exception e) {
+ System.out.println("Character 0x" + Integer.toHexString(c) +
+ " exception occurred");
+ throw e;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr025/TestMulti.cpp b/db-4.8.30/test/scr025/TestMulti.cpp
new file mode 100644
index 0000000..045380c
--- /dev/null
+++ b/db-4.8.30/test/scr025/TestMulti.cpp
@@ -0,0 +1,206 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+#include "db_cxx.h"
+#include "stdlib.h"
+
+void test1()
+{
+ int numberOfKeysToWrite= 10000;
+ Db db(0,DB_CXX_NO_EXCEPTIONS);
+ db.set_pagesize(512);
+ int err= db.open(0, "test1.db", 0, DB_BTREE, DB_CREATE, 0);
+ {
+ int i= 0;
+ Dbt key(&i,sizeof(i));
+ Dbt data(&i,sizeof(i));
+ for(;i<numberOfKeysToWrite;++i)
+ {
+ db.put(0,&key,&data,0);
+ }
+ }
+
+ {
+ Dbc *dbc;
+ err= db.cursor(0,&dbc,0);
+
+ char *check= (char*)calloc(numberOfKeysToWrite,1);
+ char buffer[8192];
+ int numberOfKeysRead= 0;
+ Dbt multikey(&numberOfKeysRead,sizeof(numberOfKeysRead));
+ Dbt multidata(&buffer,sizeof(buffer));
+ multidata.set_flags(DB_DBT_USERMEM);
+ multidata.set_ulen(sizeof(buffer));
+ err= 0;
+ while(err==0)
+ {
+ err= dbc->get(&multikey,&multidata,DB_NEXT|DB_MULTIPLE_KEY);
+ if(err==0)
+ {
+ Dbt key, data;
+ DbMultipleKeyDataIterator i(multidata);
+ while(err==0 && i.next(key,data))
+ {
+ int actualKey= *((int*)key.get_data());
+ int actualData= *((int*)data.get_data());
+ if(actualKey!=actualData)
+ {
+ std::cout << "Error: key/data mismatch. " << actualKey << "!=" << actualData << std::endl;
+ err= -1;
+ }
+ else
+ {
+ check[actualKey]++;
+ }
+ numberOfKeysRead++;
+ }
+ } else if(err!=DB_NOTFOUND)
+ std::cout << "Error: dbc->get: " << db_strerror(err) << std::endl;
+ }
+ if(numberOfKeysRead!=numberOfKeysToWrite)
+ {
+ std::cout << "Error: key count mismatch. " << numberOfKeysRead << "!=" << numberOfKeysToWrite << std::endl;
+ }
+ for(int n=0;n<numberOfKeysToWrite;++n)
+ {
+ if(check[n]!=1)
+ {
+ std::cout << "Error: key " << n << " was written to the database, but not read back." << std::endl;
+ }
+ }
+ free(check);
+ dbc->close();
+ }
+
+ db.close(0);
+}
+
+void test2()
+{
+ int numberOfKeysToWrite= 10000;
+ Db db(0,DB_CXX_NO_EXCEPTIONS);
+ db.set_flags(DB_DUP);
+ db.set_pagesize(512);
+ int err= db.open(0, "test2.db", 0, DB_BTREE, DB_CREATE, 0);
+
+ {
+ int i= 0;
+ int k= 0;
+ Dbt key(&k,sizeof(k));
+ Dbt data(&i,sizeof(i));
+ for(;i<numberOfKeysToWrite;++i)
+ {
+ err= db.put(0,&key,&data,0);
+ }
+ }
+
+ {
+ Dbc *dbc;
+ err= db.cursor(0,&dbc,0);
+
+ char buffer[8192];
+ int numberOfKeysRead= 0;
+ Dbt multikey(&numberOfKeysRead,sizeof(numberOfKeysRead));
+ Dbt multidata(&buffer,sizeof(buffer));
+ multidata.set_flags(DB_DBT_USERMEM);
+ multidata.set_ulen(sizeof(buffer));
+ err= 0;
+ while(err==0)
+ {
+ err= dbc->get(&multikey,&multidata,DB_NEXT|DB_MULTIPLE);
+ if(err==0)
+ {
+ Dbt data;
+ DbMultipleDataIterator i(multidata);
+ while(err==0 && i.next(data))
+ {
+ int actualData= *((int*)data.get_data());
+ if(numberOfKeysRead!=actualData)
+ {
+ std::cout << "Error: key/data mismatch. " << numberOfKeysRead << "!=" << actualData << std::endl;
+ err= -1;
+ }
+ numberOfKeysRead++;
+ }
+ } else if(err!=DB_NOTFOUND)
+ std::cout << "Error: dbc->get: " << db_strerror(err) << std::endl;
+ }
+ if(numberOfKeysRead!=numberOfKeysToWrite)
+ {
+ std::cout << "Error: key count mismatch. " << numberOfKeysRead << "!=" << numberOfKeysToWrite << std::endl;
+ }
+ dbc->close();
+ }
+ db.close(0);
+}
+
+void test3()
+{
+ int numberOfKeysToWrite= 10000;
+ Db db(0,DB_CXX_NO_EXCEPTIONS);
+ db.set_pagesize(512);
+ int err= db.open(0, "test3.db", 0, DB_RECNO, DB_CREATE, 0);
+
+ {
+ int i= 0;
+ Dbt key;
+ Dbt data(&i,sizeof(i));
+ for(;i<numberOfKeysToWrite;++i)
+ {
+ err= db.put(0,&key,&data,DB_APPEND);
+ }
+ }
+
+ {
+ Dbc *dbc;
+ err= db.cursor(0,&dbc,0);
+
+ char buffer[8192];
+ int numberOfKeysRead= 0;
+ Dbt multikey(&numberOfKeysRead,sizeof(numberOfKeysRead));
+ Dbt multidata(&buffer,sizeof(buffer));
+ multidata.set_flags(DB_DBT_USERMEM);
+ multidata.set_ulen(sizeof(buffer));
+ err= 0;
+ while(err==0)
+ {
+ err= dbc->get(&multikey,&multidata,DB_NEXT|DB_MULTIPLE_KEY);
+ if(err==0)
+ {
+ u_int32_t recno= 0;
+ Dbt data;
+ DbMultipleRecnoDataIterator i(multidata);
+ while(err==0 && i.next(recno,data))
+ {
+ int actualData= *((int*)data.get_data());
+ if(recno!=actualData+1)
+ {
+ std::cout << "Error: recno/data mismatch. " << recno << "!=" << actualData << "+1" << std::endl;
+ err= -1;
+ }
+ numberOfKeysRead++;
+ }
+ } else if(err!=DB_NOTFOUND)
+ std::cout << "Error: dbc->get: " << db_strerror(err) << std::endl;
+ }
+ if(numberOfKeysRead!=numberOfKeysToWrite)
+ {
+ std::cout << "Error: key count mismatch. " << numberOfKeysRead << "!=" << numberOfKeysToWrite << std::endl;
+ }
+ dbc->close();
+ }
+
+ db.close(0);
+}
+
+int main()
+{
+ test1();
+ test2();
+ test3();
+ return (0);
+}
+
diff --git a/db-4.8.30/test/scr025/chk.cxxmulti b/db-4.8.30/test/scr025/chk.cxxmulti
new file mode 100644
index 0000000..26d9393
--- /dev/null
+++ b/db-4.8.30/test/scr025/chk.cxxmulti
@@ -0,0 +1,69 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr025 # must be a relative directory
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+[ `uname` = "Linux" ] && LIBS="$LIBS -lpthread"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
+echo " ====== cxx tests using $CXX"
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/db-4.8.30/test/scr025/ignore b/db-4.8.30/test/scr025/ignore
new file mode 100644
index 0000000..bcd98b5
--- /dev/null
+++ b/db-4.8.30/test/scr025/ignore
@@ -0,0 +1,4 @@
+#
+# $Id$
+#
+# A list of tests to ignore
diff --git a/db-4.8.30/test/scr026/chk.method b/db-4.8.30/test/scr026/chk.method
new file mode 100644
index 0000000..20365a2
--- /dev/null
+++ b/db-4.8.30/test/scr026/chk.method
@@ -0,0 +1,108 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check that DB doesn't call DB or DB_ENV methods internally.
+
+d=../..
+
+t=__1
+
+echo ========================================================
+echo "Check that DB doesn't call DB or DB_ENV methods internally."
+echo ========================================================
+m=`grep '(\*[a-z][_a-z]*)' $d/dbinc/db.in |
+ sed -e 's/^[^(]*(\*//' \
+ -e 's/).*//' \
+ -e '/am_bulk/d' \
+ -e '/am_close/d' \
+ -e '/am_del/d' \
+ -e '/am_destroy/d' \
+ -e '/am_get/d' \
+ -e '/am_put/d' \
+ -e '/am_remove/d' \
+ -e '/am_rename/d' \
+ -e '/am_writelock/d' \
+ -e '/app_dispatch/d' \
+ -e '/db_append_recno/d' \
+ -e '/db_errcall/d' \
+ -e '/db_event_func/d' \
+ -e '/db_feedback/d' \
+ -e '/db_free/d' \
+ -e '/db_malloc/d' \
+ -e '/db_paniccall/d' \
+ -e '/db_realloc/d' \
+ -e '/dbt_usercopy/d' \
+ -e '/dup_compare/d' \
+ -e '/s_callback/d' |
+ sort -u`
+
+(cd $d && for i in $m; do
+ #echo "$i..." > /dev/stderr
+ egrep -- "->$i\(" */*.[ch]
+done) |
+sed \
+ -e '/Wrapper function for/d' \
+ -e '/\/db.h:/d' \
+ -e '/^[^:]*:[ ]*\*[ ]/d' \
+ -e '/^common\/db_getlong.c:/d' \
+ -e '/^common\/util_cache.c:/d' \
+ -e '/^common\/util_log.c:/d' \
+ -e '/^common\/util_sig.c:/d' \
+ -e '/^db185\//d' \
+ -e '/^db_archive\//d' \
+ -e '/^db_checkpoint\//d' \
+ -e '/^db_codegen\//d' \
+ -e '/^db_deadlock\//d' \
+ -e '/^db_dump185\//d' \
+ -e '/^db_dump\//d' \
+ -e '/^db_hotbackup\//d' \
+ -e '/^db_load\//d' \
+ -e '/^db_printlog\//d' \
+ -e '/^db_recover\//d' \
+ -e '/^db_stat\//d' \
+ -e '/^db_sql\//d' \
+ -e '/^db_upgrade\//d' \
+ -e '/^db_verify\//d' \
+ -e '/^dbm\//d' \
+ -e '/^examples_c\//d' \
+ -e '/^examples_cxx\//d' \
+ -e '/^examples_java\//d' \
+ -e '/^hsearch\//d' \
+ -e '/^libdb_java\//d' \
+ -e '/^libdb_csharp\//d' \
+ -e '/^mod_db4\//d' \
+ -e '/^mutex\/tm.c:/d' \
+ -e '/^php_db4\//d' \
+ -e '/^stl\//d' \
+ -e '/^tcl\//d' \
+ -e '/^test\//d' \
+ -e '/^test_erlang\//d' \
+ -e '/^test_perf\//d' \
+ -e '/^test_purify\//d' \
+ -e '/^test_repmgr\//d' \
+ -e '/^test_server\//d' \
+ -e '/^test_thread\//d' \
+ -e '/^test_vxworks\//d' \
+ -e '/^xa\//d' \
+ -e '/closeme->close() is a wrapper;/d' \
+ -e '/crypto.c.*db_cipher->close/d' \
+ -e '/db_err.c:.*dbenv->db_msgcall(dbenv, buf);/d' \
+ -e '/db_iface.c:.*(txn->commit(txn, nosync ? DB_TXN_NOSYNC : 0));/d' \
+ -e '/db_iface.c:.*if ((t_ret = txn->abort(txn)) != 0)/d' \
+ -e '/db_iface.c:.*return (dbenv->txn_begin(dbenv, NULL, txnidp, 0));/d' \
+ -e '/db_iface.c:.*return (dbp->get(dbp, txn, key, &data, flags));/d' \
+ -e '/dbenv->is_alive(/d' \
+ -e '/dbenv->thread_id(/d' \
+ -e '/dbenv->thread_id_string(/d' \
+ -e '/rep_util.c:.*ret = dbenv->rep_send(/d' \
+ -e '/test_mutex.c:/d'
+>$t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found DB/DB_ENV method calls DB library."
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr027/chk.javas b/db-4.8.30/test/scr027/chk.javas
new file mode 100644
index 0000000..0838cdf
--- /dev/null
+++ b/db-4.8.30/test/scr027/chk.javas
@@ -0,0 +1,10 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that the java code samples in the documents build.
+
+
+# There isn't any Java sample code left in the Reference Guide. This test
+# left as a placeholder so I don't have to renumber all of the other tests.
+exit 0
diff --git a/db-4.8.30/test/scr028/chk.rtc b/db-4.8.30/test/scr028/chk.rtc
new file mode 100644
index 0000000..62b0b44
--- /dev/null
+++ b/db-4.8.30/test/scr028/chk.rtc
@@ -0,0 +1,29 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Build a program that calls the run-time API configuration functions.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+CINC=-I..
+[ `uname` = "Linux" ] && CINC="$CINC -pthread"
+
+if cc -g -Wall $CINC t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr028/t.c b/db-4.8.30/test/scr028/t.c
new file mode 100644
index 0000000..059c46c
--- /dev/null
+++ b/db-4.8.30/test/scr028/t.c
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+/* added to get clean compile on linux blade server else pread undefined */
+#ifdef __linux__
+#define __USE_UNIX98
+#endif
+#include <unistd.h>
+
+#include "db.h"
+
+#define E(api, func, name) { \
+ if ((ret = api(func)) != 0) { \
+ fprintf(stderr, "%s: %s", name, db_strerror(ret)); \
+ return (1); \
+ } \
+}
+
+#define F(api, func1, func2, name) { \
+ if ((ret = api(func1, func2)) != 0) { \
+ fprintf(stderr, "%s: %s", name, db_strerror(ret)); \
+ return (1); \
+ } \
+}
+
+void
+dirfree(char **namesp, int cnt)
+{ return; }
+int
+dirlist(const char *dir, char ***namesp, int *cntp)
+{ return (0); }
+int
+exists(const char *path, int *isdirp)
+{ return (0); }
+int
+ioinfo(const char *path,
+ int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep)
+{ return (0); }
+int
+file_map(DB_ENV *dbenv, char *path, size_t len, int is_readonly, void **addr)
+{ return (0); }
+int
+region_map(DB_ENV *dbenv, char *path, size_t len, int *is_create, void **addr)
+{ return (0); }
+int
+seek(int fd, off_t offset, int whence)
+{ return (0); }
+int
+local_sleep(u_long seconds, u_long microseconds)
+{ return (0); }
+int
+unmap(DB_ENV *dbenv, void *addr)
+{ return (0); }
+
+int
+main(int argc, char *argv[])
+{
+ int ret;
+
+ E(db_env_set_func_close, close, "close");
+ E(db_env_set_func_dirfree, dirfree, "dirfree");
+ E(db_env_set_func_dirlist, dirlist, "dirlist");
+ E(db_env_set_func_exists, exists, "exists");
+ F(db_env_set_func_file_map, file_map, unmap, "file map");
+ E(db_env_set_func_free, free, "free");
+ E(db_env_set_func_fsync, fsync, "fsync");
+ E(db_env_set_func_ftruncate, ftruncate, "ftruncate");
+ E(db_env_set_func_ioinfo, ioinfo, "ioinfo");
+ E(db_env_set_func_malloc, malloc, "malloc");
+ E(db_env_set_func_open, open, "open");
+ E(db_env_set_func_pread, pread, "pread");
+ E(db_env_set_func_pwrite, pwrite, "pwrite");
+ E(db_env_set_func_read, read, "read");
+ E(db_env_set_func_realloc, realloc, "realloc");
+ F(db_env_set_func_region_map, region_map, unmap, "region map");
+ E(db_env_set_func_rename, rename, "rename");
+ E(db_env_set_func_seek, seek, "seek");
+ E(db_env_set_func_unlink, unlink, "unlink");
+ E(db_env_set_func_write, write, "write");
+ E(db_env_set_func_yield, local_sleep, "sleep/yield");
+
+ return (0);
+}
diff --git a/db-4.8.30/test/scr029/chk.get b/db-4.8.30/test/scr029/chk.get
new file mode 100644
index 0000000..ce56e9a
--- /dev/null
+++ b/db-4.8.30/test/scr029/chk.get
@@ -0,0 +1,29 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Build a program that calls the getters.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+CINC=-I..
+[ `uname` = "Linux" ] && CINC="$CINC -pthread"
+
+if cc -g -Wall $CINC t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr029/t.c b/db-4.8.30/test/scr029/t.c
new file mode 100644
index 0000000..22a61d6
--- /dev/null
+++ b/db-4.8.30/test/scr029/t.c
@@ -0,0 +1,209 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002-2009 Oracle. All rights reserved.
+ */
+
+#include <sys/types.h>
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db.h"
+
+#define ENV { \
+ if (dbenv != NULL) \
+ assert(dbenv->close(dbenv, 0) == 0); \
+ assert(db_env_create(&dbenv, 0) == 0); \
+ dbenv->set_errfile(dbenv, stderr); \
+}
+
+int
+main()
+{
+ const u_int8_t *lk_conflicts;
+ DB_ENV *dbenv;
+ db_timeout_t timeout;
+ u_int32_t a, b, c, v;
+ int nmodes, lk_modes;
+ u_int8_t conflicts[40];
+
+ dbenv = NULL;
+
+ /* tx_max: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_tx_max(dbenv, 37) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_TXN, 0666) == 0);
+ assert(dbenv->get_tx_max(dbenv, &v) == 0);
+ assert(v == 37);
+ ENV
+ assert(dbenv->set_tx_max(dbenv, 63) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_tx_max(dbenv, &v) == 0);
+ assert(v == 37);
+
+ /* lg_max: reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_lg_max(dbenv, 37 * 1024 * 1024) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOG, 0666) == 0);
+ assert(dbenv->get_lg_max(dbenv, &v) == 0);
+ assert(v == 37 * 1024 * 1024);
+ ENV
+ assert(dbenv->set_lg_max(dbenv, 63 * 1024 * 1024) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lg_max(dbenv, &v) == 0);
+ assert(v == 63 * 1024 * 1024);
+
+ /* lg_bsize: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_lg_bsize(dbenv, 37 * 1024) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOG, 0666) == 0);
+ assert(dbenv->get_lg_bsize(dbenv, &v) == 0);
+ assert(v == 37 * 1024);
+ ENV
+ assert(dbenv->set_lg_bsize(dbenv, 63 * 1024) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lg_bsize(dbenv, &v) == 0);
+ assert(v == 37 * 1024);
+
+ /* lg_regionmax: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_lg_regionmax(dbenv, 137 * 1024) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOG, 0666) == 0);
+ assert(dbenv->get_lg_regionmax(dbenv, &v) == 0);
+ assert(v == 137 * 1024);
+ ENV
+ assert(dbenv->set_lg_regionmax(dbenv, 163 * 1024) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lg_regionmax(dbenv, &v) == 0);
+ assert(v == 137 * 1024);
+
+ /* lk_get_lk_conflicts: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ memset(conflicts, 'a', sizeof(conflicts));
+ nmodes = 6;
+ assert(dbenv->set_lk_conflicts(dbenv, conflicts, nmodes) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0);
+ assert(dbenv->get_lk_conflicts(dbenv, &lk_conflicts, &lk_modes) == 0);
+ assert(lk_conflicts[0] == 'a');
+ assert(lk_modes == 6);
+ ENV
+ memset(conflicts, 'b', sizeof(conflicts));
+ nmodes = 8;
+ assert(dbenv->set_lk_conflicts(dbenv, conflicts, nmodes) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lk_conflicts(dbenv, &lk_conflicts, &lk_modes) == 0);
+ assert(lk_conflicts[0] == 'a');
+ assert(lk_modes == 6);
+
+ /* lk_detect: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_lk_detect(dbenv, DB_LOCK_MAXLOCKS) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0);
+ assert(dbenv->get_lk_detect(dbenv, &v) == 0);
+ assert(v == DB_LOCK_MAXLOCKS);
+ ENV
+ assert(dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lk_detect(dbenv, &v) == 0);
+ assert(v == DB_LOCK_MAXLOCKS);
+
+ /* lk_max_locks: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_lk_max_locks(dbenv, 37) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0);
+ assert(dbenv->get_lk_max_locks(dbenv, &v) == 0);
+ assert(v == 37);
+ ENV
+ assert(dbenv->set_lk_max_locks(dbenv, 63) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lk_max_locks(dbenv, &v) == 0);
+ assert(v == 37);
+
+ /* lk_max_lockers: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_lk_max_lockers(dbenv, 37) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0);
+ assert(dbenv->get_lk_max_lockers(dbenv, &v) == 0);
+ assert(v == 37);
+ ENV
+ assert(dbenv->set_lk_max_lockers(dbenv, 63) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lk_max_lockers(dbenv, &v) == 0);
+ assert(v == 37);
+
+ /* lk_max_objects: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_lk_max_objects(dbenv, 37) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0);
+ assert(dbenv->get_lk_max_objects(dbenv, &v) == 0);
+ assert(v == 37);
+ ENV
+ assert(dbenv->set_lk_max_objects(dbenv, 63) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_lk_max_objects(dbenv, &v) == 0);
+ assert(v == 37);
+
+ /* lock timeout: reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_timeout(dbenv, 37, DB_SET_LOCK_TIMEOUT) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0);
+ assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_LOCK_TIMEOUT) == 0);
+ assert(timeout == 37);
+ ENV
+ assert(dbenv->set_timeout(dbenv, 63, DB_SET_LOCK_TIMEOUT) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_LOCK_TIMEOUT) == 0);
+ assert(timeout == 63);
+
+ /* txn timeout: reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_timeout(dbenv, 37, DB_SET_TXN_TIMEOUT) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_LOCK, 0666) == 0);
+ assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_TXN_TIMEOUT) == 0);
+ assert(timeout == 37);
+ ENV
+ assert(dbenv->set_timeout(dbenv, 63, DB_SET_TXN_TIMEOUT) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_timeout(dbenv, &timeout, DB_SET_TXN_TIMEOUT) == 0);
+ assert(timeout == 63);
+
+ /* cache size: NOT reset at run-time. */
+ system("rm -rf TESTDIR; mkdir TESTDIR");
+ ENV
+ assert(dbenv->set_cachesize(dbenv, 1, 37, 3) == 0);
+ assert(dbenv->open(dbenv,
+ "TESTDIR", DB_CREATE | DB_INIT_MPOOL, 0666) == 0);
+ assert(dbenv->get_cachesize(dbenv, &a, &b, &c) == 0);
+ assert(a == 1 && b == 37 && c == 3);
+ ENV
+ assert(dbenv->set_cachesize(dbenv, 2, 63, 1) == 0);
+ assert(dbenv->open(dbenv, "TESTDIR", DB_JOINENV, 0666) == 0);
+ assert(dbenv->get_cachesize(dbenv, &a, &b, &c) == 0);
+ assert(a == 1 && b == 37 && c == 3);
+
+ return (0);
+}
diff --git a/db-4.8.30/test/scr030/chk.build b/db-4.8.30/test/scr030/chk.build
new file mode 100644
index 0000000..b7c5599
--- /dev/null
+++ b/db-4.8.30/test/scr030/chk.build
@@ -0,0 +1,124 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Build a program that calls the run-time API configuration functions.
+
+trap 'rm -rf scr030 ; exit 0' 0
+trap 'rm -rf scr030 ; exit 1' 1 2 3 13 15
+
+[ -d ../../dist ] || {
+ echo 'FAIL: unable to find top-level dist directory'
+ exit 1
+}
+
+# Flags to build Java.
+JAVA_INC=/usr/local/diablo-jdk1.5.0/include
+JAVA_FLAGS="-I$JAVA_INC -I$JAVA_INC/linux -I$JAVA_INC/freebsd"
+
+# Configure and build.
+# $1: config flags
+config()
+{
+ (echo `date`; echo "run: $1: $dir") | tee CONFIGURATION
+
+ ../../../dist/configure $1 > config.OUT 2>&1
+ if test $? -ne 0; then
+ echo "$i: FAILED in configure"
+ return 1
+ fi
+
+ if `echo "$1" | grep disable-statistics > /dev/null`; then
+ echo '#define __TEST_DB_NO_STATISTICS 1' >> db_config.h
+ fi
+
+ (echo /^CFLAGS=/ &&
+ # Configure gcc to complain about everything, and make warnings fatal
+ # errors.
+ echo \
+ 's/-c /-c -W -Werror -Wall -Wpointer-arith -Wmissing-prototypes /' &&
+ # Warnings are fatal errors, so don't set gcc warning flags for files
+ # where we can't avoid warnings.
+ echo '/^db_server_svc.*: .*db_server_svc.c$/' &&
+ echo '+1s/\$(CFLAGS)/-c \$(CPPFLAGS)/' &&
+ echo '/^db_server_util.*: .*db_server_util.c$/' &&
+ echo '+1s/\$(CFLAGS)/-c \$(CPPFLAGS)/' &&
+ echo '/^db_server_xdr.*: .*db_server_xdr.c$/' &&
+ echo '+1s/\$(CFLAGS)/-c \$(CPPFLAGS)/' &&
+ echo '/^gen_db_server.*: .*gen_db_server.c$/' &&
+ echo '+1s/\$(CFLAGS)/-c \$(CPPFLAGS)/' &&
+ echo '/^db_java_wrap.*: .*db_java_wrap.c$/' &&
+ echo '+1s/\$(CFLAGS)/-c \$(CPPFLAGS)/' &&
+ echo '/^tcl_db_pkg.*: .*tcl_db_pkg.c$/' &&
+ echo '+1s/\$(CFLAGS)/-c \$(CPPFLAGS)/' &&
+ echo w &&
+ echo q) | ed Makefile > /dev/null
+
+ # If we're compiling Java, we'll need to set up the path.
+ echo "$1" | grep enable-java > /dev/null
+ if test $? -eq 0; then
+ (echo /^CPPFLAGS=/ &&
+ echo "s;\$; $JAVA_FLAGS;" &&
+ echo w &&
+ echo q) | ed Makefile > /dev/null
+ fi
+
+ make > mklog 2>&1 && make ex_access >> mklog 2>&1
+ if test $? -ne 0; then
+ echo "$i: FAILED in make"
+ return 1
+ fi
+
+ (echo a; echo b; echo c) | ./ex_access > /dev/null 2>&1
+ return $?
+}
+
+# Run a test.
+# $1: config flags
+count=0
+r()
+{
+ count=$(expr $count + 1)
+ dir="scr030.$count"
+ (rm -rf $dir && mkdir $dir && cd $dir && config "$1")
+ if test $? -eq 0; then
+ rm -rf $dir
+ else
+ echo "$1: FAILED to build"
+ fi
+}
+
+# Run through all of the standard single options.
+s="\
+--disable-cryptography \
+--disable-hash \
+--disable-largefile \
+--disable-mutexsupport \
+--disable-queue \
+--disable-replication \
+--disable-statistics \
+--disable-verify \
+--enable-compat185 \
+--enable-debug \
+--enable-debug_rop \
+--enable-debug_wop \
+--enable-diagnostic \
+--enable-dump185 \
+--enable-posixmutexes \
+--enable-smallbuild \
+--enable-umrw \
+--with-mutex=UNIX/fcntl \
+--with-mutex=x86/gcc-assembly \
+--with-uniquename=__KEITH__"
+for i in $s; do
+ r "$i --disable-shared"
+done
+
+# Build specific runs of interest.
+r
+r "--disable-static"
+r "--enable-cxx"
+r "--enable-java"
+r "--with-tcl=/usr/local/lib/tcl8.4"
+r "--enable-test --with-tcl=/usr/local/lib/tcl8.4"
+r "--enable-cxx --enable-java --with-tcl=/usr/local/lib/tcl8.4"
diff --git a/db-4.8.30/test/scr031/chk.copy b/db-4.8.30/test/scr031/chk.copy
new file mode 100644
index 0000000..a752c7a
--- /dev/null
+++ b/db-4.8.30/test/scr031/chk.copy
@@ -0,0 +1,46 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check all source files for proper copyright notices.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+# create regex for Copyright notice using current year
+COPYEXP='Copyright.*'`date +%C%y`
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' -o -name '*.tcl' \
+ -o -name '*.java' -o -name '*.cs' -o -name '*.hpp' |
+ xargs egrep -l $COPYEXP) > $t1
+
+# use sed to remove the files we do not care about, these are the ones
+# from 3rd parties that are included in our distribution
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' -o -name '*.tcl' \
+ -o -name '*.java' -o -name '*.cs' -o -name '*.hpp') | tee /tmp/o |
+ sed -e '/crypto\//d' \
+ -e '/sha1.c$/d' \
+ -e '/sleepycat\/asm\//d' \
+ -e '/perl\//d' \
+ -e '/mod_db4\//d' \
+ -e '/sqlite\//d' > $t2
+
+
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> missing copyright notices"
+ diff $t1 $t2 | grep '>' | awk '{print $2}'
+ exit 1
+fi
+
+exit 0
diff --git a/db-4.8.30/test/scr032/chk.rpc b/db-4.8.30/test/scr032/chk.rpc
new file mode 100644
index 0000000..b1d1270
--- /dev/null
+++ b/db-4.8.30/test/scr032/chk.rpc
@@ -0,0 +1,82 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that the code samples in the documents build.
+
+r=../../rpc_server/rpc.src
+i=../../dbinc/db.in
+
+t1=__1
+t2=__2
+
+[ -d ../../dbinc ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+
+# $1: handle name
+# $2: handle prefix
+# $3: method file
+check()
+{
+ echo "==== Checking $1/$2..."
+
+ # Build a list of DB_ENV handle methods from the include file.
+ sed -e "/$1 PUBLIC HANDLE LIST BEGIN/,/$1 PUBLIC HANDLE LIST END/p" \
+ -e d < $i |
+ grep '[\* ](\*[a-z]' |
+ sed -e 's/).*$//' \
+ -e 's/.*(\*//' \
+ -e '/^$/d' > $t1
+
+ # Build a list of handle methods from the rpc.src file.
+ egrep '^BEGIN|^LOCAL|^NOFUNC' $r |
+ awk '{print $2}' |
+ egrep "^$2_" |
+ sed -e "/^$2_create/d" \
+ -e "s/$2_//" > $t2
+
+ if cmp -s $t1 $t2 ; then
+ :
+ else
+ echo "FAIL: $1 handle methods do not match."
+ echo "<<< dbinc/db.in >>> rpc_server/rpc.src"
+ diff $t1 $t2
+ exit 1
+ fi
+
+ if [ -z "$3" ]; then
+ return
+ fi
+
+ # Build a list of handle methods from the env/env_method.c and
+ # db/db_method.c files.
+ sed -e "/$1 PUBLIC HANDLE LIST BEGIN/,/$1 PUBLIC HANDLE LIST END/p" \
+ -e d < "$3" |
+ sed -e '/^#ifdef.HAVE_REPLICATION_THREADS/d' \
+ -e '/^#else.*HAVE_REPLICATION_THREADS/,/^#endif/d' \
+ -e '/PUBLIC/d' \
+ -e 's/ = .*//' \
+ -e 's/^.*->//' > $t2
+
+ if cmp -s $t1 $t2 ; then
+ :
+ else
+ echo "FAIL: $1 handle methods do not match."
+ echo "<<< dbinc/db.in >>> $3"
+ diff $t1 $t2
+ exit 1
+ fi
+}
+
+# We don't check the DB handle method limits from db/db_method.c, DB handle
+# methods are set in per-access method routines, they aren't consolidated.
+check DB db
+check DBC dbc
+check DB_ENV env ../../env/env_method.c
+check DB_TXN txn
+
+exit $exitv
diff --git a/db-4.8.30/test/scr033/chk.codegen b/db-4.8.30/test/scr033/chk.codegen
new file mode 100644
index 0000000..1deef0e
--- /dev/null
+++ b/db-4.8.30/test/scr033/chk.codegen
@@ -0,0 +1,43 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure that the db_codegen examples build and compile.
+
+d=../../db_codegen
+
+[ -d $d ] || {
+ echo 'FAIL: cannot find db_codegen source directory.'
+ exit 1
+}
+(cd .. && make db_codegen > /dev/null) || {
+ echo 'FAIL: unable to build db_codgen'
+ exit 1
+}
+
+for i in `find $d -name 'example[0-9]*'` ; do
+ echo " example $i"
+ rm -rf BUILD && mkdir BUILD && cd BUILD
+ if ../../db_codegen -a c -i ../$i; then
+ :
+ else
+ echo "FAIL: failed to load $i"
+ exit 1
+ fi
+ if cc -DBUILD_STANDALONE -pthread \
+ -Wall -Werror -I../.. application.c ../../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: failed to compile $i"
+ exit 1
+ fi
+ if ./t ; then
+ :
+ else
+ echo "FAIL: failed to run $i"
+ exit 1
+ fi
+ cd ..
+done
+
+exit 0
diff --git a/db-4.8.30/test/scr034/chk.mtx b/db-4.8.30/test/scr034/chk.mtx
new file mode 100644
index 0000000..fc093eb
--- /dev/null
+++ b/db-4.8.30/test/scr034/chk.mtx
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure __mutex_print_id knows about all of the mutex types.
+
+d=../../
+
+[ -d $d/mutex ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep 'case MTX_.*return' $d/mutex/mut_stat.c |
+sed -e 's/.*case //' \
+ -e 's/:.*//' |
+sort > $t1
+
+egrep '#define.MTX_' $d/dbinc/mutex.h |
+sed -e 's/#define.//' \
+ -e 's/ .*//' \
+ -e '/MTX_MAX_ENTRY/d' |
+sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< mutex/mut_stat.c >>> dbinc/mutex.h"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/db-4.8.30/test/scr035/chk.osdir b/db-4.8.30/test/scr035/chk.osdir
new file mode 100644
index 0000000..36a2f4e
--- /dev/null
+++ b/db-4.8.30/test/scr035/chk.osdir
@@ -0,0 +1,27 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# Check to make sure the @OSDIR@ entries in the Makefile are correct.
+
+d=../../dist
+
+[ -d $d ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep '/@OSDIR@/' $d/Makefile.in | sed -e 's/@.*/.c/' > t1
+
+(cd $d/../os_windows && ls os_*.c) > t2
+
+cmp t1 t2 || {
+ echo "Makefile @OSDIR@ mismatch with os_windows files"
+ echo "<<< Makefile >>> os_windows"
+ diff t1 t2
+ exit 1
+}
+exit 0
diff --git a/db-4.8.30/test/scr037/AllTestData.xml b/db-4.8.30/test/scr037/AllTestData.xml
new file mode 100644
index 0000000..93fdd24
--- /dev/null
+++ b/db-4.8.30/test/scr037/AllTestData.xml
@@ -0,0 +1,796 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<Assembly>
+ <TestFixture name="DatabaseConfigTest">
+ <Test name="TestConfigWithoutEnv">
+ <AutoCommit>True</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <DoChecksum>True</DoChecksum>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <FreeThreaded>True</FreeThreaded>
+ <NoMMap>True</NoMMap>
+ <NonDurableTxns>True</NonDurableTxns>
+ <PageSize>8196</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>True</ReadOnly>
+ <ReadUncommitted>True</ReadUncommitted>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Truncate>True</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>True</UseMVCC>
+ </Test>
+ <Test name="TestConfigWithEnv">
+ <Env>
+ <Create>True</Create>
+ <CreationDir>.//test_DB</CreationDir>
+ <DataDir>.//test_DB</DataDir>
+ <ErrorPrefix>TestDatabaseConfigWithEnv</ErrorPrefix>
+ </Env>
+ </Test>
+ </TestFixture>
+ <TestFixture name="BTreeDatabaseConfigTest">
+ <Test name="TestConfigWithoutEnv">
+ <AutoCommit>False</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <Creation>ALWAYS</Creation>
+ <DoChecksum>False</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <FreeThreaded>True</FreeThreaded>
+ <MinKeysPerPage>64</MinKeysPerPage>
+ <NoMMap>False</NoMMap>
+ <NonDurableTxns>False</NonDurableTxns>
+ <NoReverseSplitting>True</NoReverseSplitting>
+ <PageSize>8192</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>False</ReadOnly>
+ <ReadUncommitted>False</ReadUncommitted>
+ <Truncate>False</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>False</UseMVCC>
+ <UseRecordNumbers>False</UseRecordNumbers>
+ </Test>
+ </TestFixture>
+ <TestFixture name="BTreeDatabaseTest">
+ <Test name="TestOpenNewBtreeDB">
+ <AutoCommit>False</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <Creation>ALWAYS</Creation>
+ <DoChecksum>False</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <FreeThreaded>True</FreeThreaded>
+ <HasMultiple>True</HasMultiple>
+ <MinKeysPerPage>64</MinKeysPerPage>
+ <NoMMap>False</NoMMap>
+ <NonDurableTxns>False</NonDurableTxns>
+ <NoReverseSplitting>True</NoReverseSplitting>
+ <PageSize>8192</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>False</ReadOnly>
+ <ReadUncommitted>False</ReadUncommitted>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Truncate>False</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>False</UseMVCC>
+ <UseRecordNumbers>False</UseRecordNumbers>
+ </Test>
+ </TestFixture>
+ <TestFixture name="HashDatabaseConfigTest">
+ <Test name="TestConfigWithoutEnv">
+ <AutoCommit>False</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <Creation>ALWAYS</Creation>
+ <DoChecksum>False</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <ErrorPrefix>TestConfigWithoutEnv</ErrorPrefix>
+ <FillFactor>7</FillFactor>
+ <FreeThreaded>False</FreeThreaded>
+ <NoMMap>True</NoMMap>
+ <NonDurableTxns>True</NonDurableTxns>
+ <NumElements>100</NumElements>
+ <PageSize>8192</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>False</ReadOnly>
+ <ReadUncommitted>False</ReadUncommitted>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Truncate>False</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>False</UseMVCC>
+ </Test>
+ </TestFixture>
+ <TestFixture name="HashDatabaseTest">
+ <Test name="TestOpenNewHashDB">
+ <AutoCommit>False</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <Creation>ALWAYS</Creation>
+ <DoChecksum>False</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <FillFactor>2</FillFactor>
+ <FreeThreaded>True</FreeThreaded>
+ <HasMultiple>False</HasMultiple>
+ <NoMMap>False</NoMMap>
+ <NonDurableTxns>False</NonDurableTxns>
+ <NumElements>10</NumElements>
+ <PageSize>8192</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>False</ReadOnly>
+ <ReadUncommitted>False</ReadUncommitted>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Truncate>False</Truncate>
+ <UseMVCC>False</UseMVCC>
+ </Test>
+ </TestFixture>
+ <TestFixture name="RecnoDatabaseConfigTest">
+ <Test name="TestConfigWithoutEnv">
+ <AutoCommit>False</AutoCommit>
+ <BackingFile>../backing.log</BackingFile>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <ConsumeInOrder>True</ConsumeInOrder>
+ <Creation>IF_NEEDED</Creation>
+ <Delimiter>10</Delimiter>
+ <DoChecksum>True</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <ExtentSize>2</ExtentSize>
+ <FreeThreaded>True</FreeThreaded>
+ <Length>100</Length>
+ <PadByte>10</PadByte>
+ <NoMMap>False</NoMMap>
+ <NonDurableTxns>True</NonDurableTxns>
+ <PageSize>8192</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>True</ReadOnly>
+ <ReadUncommitted>True</ReadUncommitted>
+ <Renumber>True</Renumber>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Snapshot>True</Snapshot>
+ <Truncate>True</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>True</UseMVCC>
+ </Test>
+ </TestFixture>
+ <TestFixture name="QueueDatabaseConfigTest">
+ <Test name="TestConfigWithoutEnv">
+ <AutoCommit>False</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <ConsumeInOrder>True</ConsumeInOrder>
+ <Creation>ALWAYS</Creation>
+ <DoChecksum>False</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <ExtentSize>2</ExtentSize>
+ <FreeThreaded>True</FreeThreaded>
+ <Length>20</Length>
+ <PadByte>10</PadByte>
+ <NoMMap>False</NoMMap>
+ <NonDurableTxns>False</NonDurableTxns>
+ <PageSize>40960</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>False</ReadOnly>
+ <ReadUncommitted>False</ReadUncommitted>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Truncate>False</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>False</UseMVCC>
+ </Test>
+ </TestFixture>
+ <TestFixture name="QueueDatabaseTest">
+ <Test name="TestOpenNewQueueDB">
+ <AutoCommit>False</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <ConsumeInOrder>True</ConsumeInOrder>
+ <Creation>ALWAYS</Creation>
+ <DoChecksum>False</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <ExtentSize>2</ExtentSize>
+ <FreeThreaded>True</FreeThreaded>
+ <HasMultiple>False</HasMultiple>
+ <Length>100</Length>
+ <PadByte>10</PadByte>
+ <NoMMap>False</NoMMap>
+ <NonDurableTxns>False</NonDurableTxns>
+ <PageSize>8192</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>False</ReadOnly>
+ <ReadUncommitted>False</ReadUncommitted>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Truncate>False</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>False</UseMVCC>
+ </Test>
+ </TestFixture>
+ <TestFixture name="RecnoDatabaseTest">
+ <Test name="TestOpenNewRecnoDB">
+ <AutoCommit>False</AutoCommit>
+ <ByteOrder>1234</ByteOrder>
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <Creation>ALWAYS</Creation>
+ <Delimiter>100</Delimiter>
+ <DoChecksum>False</DoChecksum>
+ <Duplicates>UNSORTED</Duplicates>
+ <ErrorPrefix>TestDatabaseConfig</ErrorPrefix>
+ <FreeThreaded>True</FreeThreaded>
+ <HasMultiple>True</HasMultiple>
+ <Length>100</Length>
+ <NoMMap>False</NoMMap>
+ <NonDurableTxns>False</NonDurableTxns>
+ <NoReverseSplitting>True</NoReverseSplitting>
+ <PadByte>100</PadByte>
+ <PageSize>8192</PageSize>
+ <Priority>HIGH</Priority>
+ <ReadOnly>False</ReadOnly>
+ <ReadUncommitted>False</ReadUncommitted>
+ <Renumber>False</Renumber>
+ <Encryption>
+ <password>123</password>
+ <algorithm>AES</algorithm>
+ </Encryption>
+ <Snapshot>False</Snapshot>
+ <Truncate>False</Truncate>
+ <UseEncryption>True</UseEncryption>
+ <UseMVCC>False</UseMVCC>
+ <UseRecordNumbers>False</UseRecordNumbers>
+ </Test>
+ </TestFixture>
+ <TestFixture name="LockingConfigTest">
+ <Test name="TestConfig">
+ <Conflicts>
+ <E>
+ <E>1</E>
+ <E>1</E>
+ <E>1</E>
+ </E>
+ <E>
+ <E>1</E>
+ <E>1</E>
+ <E>1</E>
+ </E>
+ <E>
+ <E>1</E>
+ <E>1</E>
+ <E>1</E>
+ </E>
+ </Conflicts>
+ <DeadlockResolution>MAX_LOCKS</DeadlockResolution>
+ <MaxLocks>10</MaxLocks>
+ <MaxLockers>10</MaxLockers>
+ <MaxObjects>10</MaxObjects>
+ <Partitions>3</Partitions>
+ </Test>
+ </TestFixture>
+ <TestFixture name="LogConfigTest">
+ <Test name="TestConfig">
+ <AutoRemove>True</AutoRemove>
+ <BufferSize>4096</BufferSize>
+ <Dir>./</Dir>
+ <FileMode>755</FileMode>
+ <ForceSync>True</ForceSync>
+ <InMemory>True</InMemory>
+ <MaxFileSize>1048576</MaxFileSize>
+ <NoBuffer>True</NoBuffer>
+ <RegionSize>30720</RegionSize>
+ <ZeroOnCreate>True</ZeroOnCreate>
+ </Test>
+ </TestFixture>
+ <TestFixture name="MutexConfigTest">
+ <Test name="TestConfig">
+ <Alignment>256</Alignment>
+ <Increment>10</Increment>
+ <MaxMutexes>50</MaxMutexes>
+ <NumTestAndSetSpins>5</NumTestAndSetSpins>
+ </Test>
+ </TestFixture>
+ <TestFixture name="TransactionConfigTest">
+ <Test name="TestConfig">
+ <IsolationDegree>2</IsolationDegree>
+ <NoWait>True</NoWait>
+ <Snapshot>True</Snapshot>
+ <SyncAction>WRITE_NOSYNC</SyncAction>
+ </Test>
+ </TestFixture>
+ <TestFixture name="MPoolConfigTest">
+ <Test name="TestConfig">
+ <CacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </CacheSize>
+ <MaxCacheSize>
+ <Bytes>10485760</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>1</NCaches>
+ </MaxCacheSize>
+ <MaxOpenFiles>10</MaxOpenFiles>
+ <MaxSequentialWrites>
+ <maxWrites>10</maxWrites>
+ <pause>1000</pause>
+ </MaxSequentialWrites>
+ <MMapSize>1048576</MMapSize>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryDatabaseConfigTest">
+ <Test name="TestConfig">
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryBTreeDatabaseConfigTest">
+ <Test name="TestConfig">
+ <Creation>NEVER</Creation>
+ <Duplicates>SORTED</Duplicates>
+ <NoReverseSplitting>True</NoReverseSplitting>
+ <UseRecordNumbers>True</UseRecordNumbers>
+ <MinKeysPerPage>100</MinKeysPerPage>
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryBTreeDatabaseTest">
+ <Test name="TestOpen">
+ <ImmutableKey>False</ImmutableKey>
+ <Populate>True</Populate>
+ <Creation>ALWAYS</Creation>
+ <Duplicates>SORTED</Duplicates>
+ <NoReverseSplitting>True</NoReverseSplitting>
+ <UseRecordNumbers>False</UseRecordNumbers>
+ <MinKeysPerPage>64</MinKeysPerPage>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryHashDatabaseConfigTest">
+ <Test name="TestConfig">
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ <Creation>NEVER</Creation>
+ <Duplicates>SORTED</Duplicates>
+ <FillFactor>5</FillFactor>
+ <NumElements>100</NumElements>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryHashDatabaseTest">
+ <Test name="TestOpen">
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ <Creation>ALWAYS</Creation>
+ <Duplicates>SORTED</Duplicates>
+ <FillFactor>5</FillFactor>
+ <NumElements>100</NumElements>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryQueueDatabaseConfigTest">
+ <Test name="TestConfig">
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ <Creation>NEVER</Creation>
+ <Length>100</Length>
+ <PadByte>10</PadByte>
+ <ExtentSize>4</ExtentSize>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryQueueDatabaseTest">
+ <Test name="TestOpen">
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ <Creation>ALWAYS</Creation>
+ <Length>100</Length>
+ <PadByte>10</PadByte>
+ <ExtentSize>4</ExtentSize>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryRecnoDatabaseConfigTest">
+ <Test name="TestConfig">
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ <BackingFile>.//backing</BackingFile>
+ <Creation>NEVER</Creation>
+ <Delimiter>100</Delimiter>
+ <Length>100</Length>
+ <PadByte>100</PadByte>
+ <Renumber>False</Renumber>
+ <Snapshot>False</Snapshot>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SecondaryRecnoDatabaseTest">
+ <Test name="TestOpen">
+ <ImmutableKey>True</ImmutableKey>
+ <Populate>True</Populate>
+ <Creation>ALWAYS</Creation>
+ <Delimiter>100</Delimiter>
+ <Length>120</Length>
+ <PadByte>50</PadByte>
+ <Renumber>False</Renumber>
+ <Snapshot>False</Snapshot>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SequenceConfigTest">
+ <Test name="TestConfig">
+ <CacheSize>1048576</CacheSize>
+ <Creation>ALWAYS</Creation>
+ <Decrement>True</Decrement>
+ <FreeThreaded>True</FreeThreaded>
+ <Increment>True</Increment>
+ <InitialValue>10</InitialValue>
+ <Max>1000</Max>
+ <Min>17</Min>
+ <Wrap>True</Wrap>
+ </Test>
+ </TestFixture>
+ <TestFixture name="SequenceTest">
+ <Test name="TestConfig">
+ <CacheSize>1048576</CacheSize>
+ <Creation>ALWAYS</Creation>
+ <Decrement>True</Decrement>
+ <FreeThreaded>True</FreeThreaded>
+ <Increment>False</Increment>
+ <InitialValue>10</InitialValue>
+ <Wrap>True</Wrap>
+ </Test>
+ </TestFixture>
+ <TestFixture name="DatabaseEnvironmentConfigTest">
+ <Test name="TestConfig">
+ <AutoCommit>True</AutoCommit>
+ <CDB_ALLDB>True</CDB_ALLDB>
+ <Create>True</Create>
+ <CreationDir>./</CreationDir>
+ <DataDirs>
+ <a>./</a>
+ <a>./TestDbEnv</a>
+ </DataDirs>
+ <ErrorPrefix>TestConfig</ErrorPrefix>
+ <ForceFlush>True</ForceFlush>
+ <FreeThreaded>True</FreeThreaded>
+ <MaxTransactions>10</MaxTransactions>
+ <InitRegions>True</InitRegions>
+ <IntermediateDirMode>rwx------</IntermediateDirMode>
+ <Lockdown>True</Lockdown>
+ <LockTimeout>1000</LockTimeout>
+ <NoBuffer>True</NoBuffer>
+ <NoLocking>True</NoLocking>
+ <NoMMap>True</NoMMap>
+ <NoPanic>True</NoPanic>
+ <Overwrite>True</Overwrite>
+ <Private>True</Private>
+ <Register>True</Register>
+ <RunFatalRecovery>True</RunFatalRecovery>
+ <RunRecovery>True</RunRecovery>
+ <SystemMemory>True</SystemMemory>
+ <TempDir>.//test_DB</TempDir>
+ <TimeNotGranted>True</TimeNotGranted>
+ <TxnNoSync>True</TxnNoSync>
+ <TxnNoWait>True</TxnNoWait>
+ <TxnSnapshot>True</TxnSnapshot>
+ <TxnTimestamp>2008-12-09</TxnTimestamp>
+ <TxnWriteNoSync>True</TxnWriteNoSync>
+ <UseCDB>True</UseCDB>
+ <UseLocking>True</UseLocking>
+ <UseLogging>True</UseLogging>
+ <UseMPool>True</UseMPool>
+ <UseMVCC>True</UseMVCC>
+ <UseReplication>True</UseReplication>
+ <UseTxns>True</UseTxns>
+ <Verbosity>
+ <AllFileOps>True</AllFileOps>
+ <Deadlock>True</Deadlock>
+ <FileOps>True</FileOps>
+ <Recovery>True</Recovery>
+ <Register>True</Register>
+ <Replication>True</Replication>
+ <ReplicationElection>True</ReplicationElection>
+ <ReplicationLease>True</ReplicationLease>
+ <ReplicationMessages>True</ReplicationMessages>
+ <ReplicationMisc>True</ReplicationMisc>
+ <ReplicationSync>True</ReplicationSync>
+ <RepMgrConnectionFailure>True</RepMgrConnectionFailure>
+ <RepMgrMisc>True</RepMgrMisc>
+ <WaitsForTable>True</WaitsForTable>
+ </Verbosity>
+ <YieldCPU>True</YieldCPU>
+ </Test>
+ <Test name="TestConfigLock">
+ <Conflicts>
+ <E>
+ <E>10</E>
+ <E>20</E>
+ </E>
+ <E>
+ <E>30</E>
+ <E>40</E>
+ </E>
+ </Conflicts>
+ <DeadlockResolution>MAX_LOCKS</DeadlockResolution>
+ <MaxLocks>50</MaxLocks>
+ <MaxLockers>60</MaxLockers>
+ <MaxObjects>70</MaxObjects>
+ <Partitions>3</Partitions>
+ </Test>
+ <Test name="TestConfigLog">
+ <AutoRemove>True</AutoRemove>
+ <BufferSize>10240</BufferSize>
+ <Dir>./</Dir>
+ <FileMode>755</FileMode>
+ <ForceSync>True</ForceSync>
+ <InMemory>True</InMemory>
+ <MaxFileSize>1048576</MaxFileSize>
+ <NoBuffer>True</NoBuffer>
+ <RegionSize>20480</RegionSize>
+ <ZeroOnCreate>True</ZeroOnCreate>
+ </Test>
+ <Test name="TestConfigMutex">
+ <Alignment>512</Alignment>
+ <Increment>128</Increment>
+ <MaxMutexes>15</MaxMutexes>
+ <NumTestAndSetSpins>10</NumTestAndSetSpins>
+ </Test>
+ <Test name="TestConfigReplication">
+ <AckTimeout>2000</AckTimeout>
+ <BulkTransfer>True</BulkTransfer>
+ <CheckpointDelay>100</CheckpointDelay>
+ <ConnectionRetry>100</ConnectionRetry>
+ <DelayClientSync>True</DelayClientSync>
+ <ElectionRetry>15</ElectionRetry>
+ <ElectionTimeout>1050</ElectionTimeout>
+ <FullElectionTimeout>5000</FullElectionTimeout>
+ <HeartbeatMonitor>100</HeartbeatMonitor>
+ <HeartbeatSend>50</HeartbeatSend>
+ <LeaseTimeout>1025</LeaseTimeout>
+ <NoAutoInit>True</NoAutoInit>
+ <NoBlocking>True</NoBlocking>
+ <NSites>7</NSites>
+ <Priority>1</Priority>
+ <RepMgrAckPolicy>ALL</RepMgrAckPolicy>
+ <RepMgrLocalSite>
+ <Host>127.0.0.0</Host>
+ <Port>11111</Port>
+ </RepMgrLocalSite>
+ <Strict2Site>True</Strict2Site>
+ <UseMasterLeases>True</UseMasterLeases>
+ </Test>
+ </TestFixture>
+ <TestFixture name="DatabaseEnvironmentTest">
+ <Test name="TestConfigAll">
+ <AutoCommit>True</AutoCommit>
+ <CDB_ALLDB>False</CDB_ALLDB>
+ <Create>True</Create>
+ <CreationDir>./</CreationDir>
+ <DataDirs>
+ <a>./</a>
+ <a>./TestDbEnv</a>
+ </DataDirs>
+ <ErrorPrefix>TestConfigAll</ErrorPrefix>
+ <ForceFlush>False</ForceFlush>
+ <FreeThreaded>True</FreeThreaded>
+ <MaxTransactions>10</MaxTransactions>
+ <InitRegions>True</InitRegions>
+ <IntermediateDirMode>rwx------</IntermediateDirMode>
+ <Lockdown>False</Lockdown>
+ <LockTimeout>1000</LockTimeout>
+ <NoBuffer>False</NoBuffer>
+ <NoLocking>False</NoLocking>
+ <NoMMap>False</NoMMap>
+ <NoPanic>False</NoPanic>
+ <Overwrite>True</Overwrite>
+ <Private>False</Private>
+ <Register>False</Register>
+ <RunFatalRecovery>False</RunFatalRecovery>
+ <RunRecovery>False</RunRecovery>
+ <SystemMemory>False</SystemMemory>
+ <TempDir>.//test_DB</TempDir>
+ <TimeNotGranted>True</TimeNotGranted>
+ <TxnNoSync>False</TxnNoSync>
+ <TxnNoWait>False</TxnNoWait>
+ <TxnSnapshot>False</TxnSnapshot>
+ <TxnTimestamp>2008-12-09</TxnTimestamp>
+ <TxnWriteNoSync>False</TxnWriteNoSync>
+ <UseCDB>False</UseCDB>
+ <UseLocking>True</UseLocking>
+ <UseLogging>True</UseLogging>
+ <UseMPool>True</UseMPool>
+ <UseMVCC>True</UseMVCC>
+ <UseReplication>True</UseReplication>
+ <UseTxns>True</UseTxns>
+ <Verbosity>
+ <AllFileOps>True</AllFileOps>
+ <Deadlock>True</Deadlock>
+ <FileOps>True</FileOps>
+ <Recovery>False</Recovery>
+ <Register>False</Register>
+ <Replication>True</Replication>
+ <ReplicationElection>True</ReplicationElection>
+ <ReplicationLease>True</ReplicationLease>
+ <ReplicationMessages>True</ReplicationMessages>
+ <ReplicationMisc>True</ReplicationMisc>
+ <ReplicationSync>True</ReplicationSync>
+ <RepMgrConnectionFailure>True</RepMgrConnectionFailure>
+ <RepMgrMisc>True</RepMgrMisc>
+ <WaitsForTable>False</WaitsForTable>
+ </Verbosity>
+ <YieldCPU>True</YieldCPU>
+
+ <LockingConfig>
+ <Conflicts>
+ <E>
+ <E>2</E>
+ <E>1</E>
+ </E>
+ <E>
+ <E>1</E>
+ <E>2</E>
+ </E>
+ </Conflicts>
+ <DeadlockResolution>MAX_LOCKS</DeadlockResolution>
+ <MaxLocks>50</MaxLocks>
+ <MaxLockers>10</MaxLockers>
+ <MaxObjects>60</MaxObjects>
+ <Partitions>10</Partitions>
+ </LockingConfig>
+
+ <LogConfig>
+ <AutoRemove>False</AutoRemove>
+ <BufferSize>10240</BufferSize>
+ <Dir>./</Dir>
+ <FileMode>755</FileMode>
+ <ForceSync>True</ForceSync>
+ <InMemory>False</InMemory>
+ <MaxFileSize>1048576</MaxFileSize>
+ <NoBuffer>False</NoBuffer>
+ <RegionSize>204800</RegionSize>
+ <ZeroOnCreate>True</ZeroOnCreate>
+ </LogConfig>
+
+ <MutexConfig>
+ <Alignment>512</Alignment>
+ <Increment>128</Increment>
+ <MaxMutexes>10000</MaxMutexes>
+ <NumTestAndSetSpins>10</NumTestAndSetSpins>
+ </MutexConfig>
+
+ <MPoolConfig>
+ <CacheSize>
+ <Bytes>10485760</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>2</NCaches>
+ </CacheSize>
+ <MaxCacheSize>
+ <Bytes>1048576</Bytes>
+ <Gigabytes>0</Gigabytes>
+ <NCaches>0</NCaches>
+ </MaxCacheSize>
+ <MaxOpenFiles>5</MaxOpenFiles>
+ <MaxSequentialWrites>
+ <maxWrites>1</maxWrites>
+ <pause>1000</pause>
+ </MaxSequentialWrites>
+ <MMapSize>1048576</MMapSize>
+ </MPoolConfig>
+
+ <ReplicationConfig>
+ <AckTimeout>2000</AckTimeout>
+ <BulkTransfer>True</BulkTransfer>
+ <CheckpointDelay>100</CheckpointDelay>
+ <ConnectionRetry>100</ConnectionRetry>
+ <DelayClientSync>True</DelayClientSync>
+ <ElectionRetry>15</ElectionRetry>
+ <ElectionTimeout>1050</ElectionTimeout>
+ <FullElectionTimeout>5000</FullElectionTimeout>
+ <HeartbeatMonitor>100</HeartbeatMonitor>
+ <HeartbeatSend>50</HeartbeatSend>
+ <LeaseTimeout>1025</LeaseTimeout>
+ <NoAutoInit>True</NoAutoInit>
+ <NoBlocking>True</NoBlocking>
+ <NSites>7</NSites>
+ <Priority>1</Priority>
+ <RepMgrAckPolicy>ALL</RepMgrAckPolicy>
+ <RepMgrLocalSite>
+ <Host>127.0.0.0</Host>
+ <Port>11111</Port>
+ </RepMgrLocalSite>
+ <Strict2Site>True</Strict2Site>
+ <UseMasterLeases>True</UseMasterLeases>
+ </ReplicationConfig>
+ </Test>
+ </TestFixture>
+ <TestFixture name="CursorConfigTest">
+ <Test name="TestConfig">
+ <IsolationDegree>2</IsolationDegree>
+ <Priority>VERY_HIGH</Priority>
+ <SnapshotIsolation>True</SnapshotIsolation>
+ <WriteCursor>True</WriteCursor>
+ </Test>
+ </TestFixture>
+ <TestFixture name="ReplicationConfigTest">
+ <Test name="TestConfig">
+ <AckTimeout>1000</AckTimeout>
+ <BulkTransfer>True</BulkTransfer>
+ <CheckpointDelay>101</CheckpointDelay>
+ <ConnectionRetry>102</ConnectionRetry>
+ <DelayClientSync>True</DelayClientSync>
+ <ElectionRetry>15</ElectionRetry>
+ <ElectionTimeout>1050</ElectionTimeout>
+ <FullElectionTimeout>5000</FullElectionTimeout>
+ <HeartbeatMonitor>100</HeartbeatMonitor>
+ <HeartbeatSend>50</HeartbeatSend>
+ <LeaseTimeout>1025</LeaseTimeout>
+ <NoAutoInit>True</NoAutoInit>
+ <NoBlocking>True</NoBlocking>
+ <NSites>7</NSites>
+ <Priority>1</Priority>
+ <RepMgrAckPolicy>ALL</RepMgrAckPolicy>
+ <RepMgrLocalSite>
+ <Host>127.0.0.0</Host>
+ <Port>11111</Port>
+ </RepMgrLocalSite>
+ <Strict2Site>True</Strict2Site>
+ <UseMasterLeases>True</UseMasterLeases>
+ </Test>
+ </TestFixture>
+</Assembly> \ No newline at end of file
diff --git a/db-4.8.30/test/scr037/BTreeCursorTest.cs b/db-4.8.30/test/scr037/BTreeCursorTest.cs
new file mode 100644
index 0000000..0fb6918
--- /dev/null
+++ b/db-4.8.30/test/scr037/BTreeCursorTest.cs
@@ -0,0 +1,1192 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class BTreeCursorTest
+ {
+ private string testFixtureName;
+ private string testFixtureHome;
+ private string testName;
+ private string testHome;
+
+ private DatabaseEnvironment paramEnv;
+ private BTreeDatabase paramDB;
+ private EventWaitHandle signal;
+
+ private delegate void BTCursorMoveFuncDelegate(
+ BTreeCursor cursor, LockingInfo lockingInfo);
+ private BTCursorMoveFuncDelegate btCursorFunc;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "BTreeCursorTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ }
+
+ [Test]
+ public void TestAddKeyFirst()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestAddKeyFirst";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Add record("key", "data") into database.
+ CursorTest.GetCursorInBtreeDBWithoutEnv(
+ testHome, testName, out db, out cursor);
+ CursorTest.AddOneByCursor(db, cursor);
+
+ // Add record("key","data1") as the first of the data item of "key".
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data1")));
+ cursor.Add(pair, Cursor.InsertLocation.FIRST);
+
+ // Confirm the record is added as the first of the data item of "key".
+ cursor.Move(new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")), true);
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("data1"),
+ cursor.Current.Value.Data);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestAddKeyLast()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestAddKeyLast";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Add record("key", "data") into database.
+ CursorTest.GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+ CursorTest.AddOneByCursor(db, cursor);
+
+ // Add new record("key","data1") as the last of the data item of "key".
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data1")));
+ cursor.Add(pair, Cursor.InsertLocation.LAST);
+
+ // Confirm the record is added as the first of the data item of "key".
+ cursor.Move(new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")), true);
+ Assert.AreNotEqual(ASCIIEncoding.ASCII.GetBytes("data1"),
+ cursor.Current.Value.Data);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestAddUnique()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestAddUnique";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and cursor.
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ // To put no duplicate data, the database should be set to be sorted.
+ dbConfig.Duplicates = DuplicatesPolicy.SORTED;
+ db = BTreeDatabase.Open(testHome + "/" + testName + ".db", dbConfig);
+ cursor = db.Cursor();
+
+ // Add record("key", "data") into database.
+ CursorTest.AddOneByCursor(db, cursor);
+
+ // Fail to add duplicate record("key","data").
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ try
+ {
+ cursor.AddUnique(pair);
+ }
+ catch (KeyExistException)
+ {
+ }
+ finally
+ {
+ cursor.Close();
+ db.Close();
+ }
+ }
+
+ [Test]
+ public void TestDuplicateWithSamePos()
+ {
+ BTreeDatabase db;
+ BTreeDatabaseConfig dbConfig;
+ BTreeCursor cursor, dupCursor;
+ DatabaseEnvironment env;
+ DatabaseEnvironmentConfig envConfig;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ Transaction txn;
+
+ testName = "TestDuplicateWithSamePos";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ envConfig = new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.NoMMap = false;
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+
+ txn = env.BeginTransaction();
+ dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ db = BTreeDatabase.Open(testName + ".db", dbConfig, txn);
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data"));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ db.Put(key, data, txn);
+ txn.Commit();
+
+ txn = env.BeginTransaction();
+ cursor = db.Cursor(txn);
+ cursor.Move(key, true);
+
+ //Duplicate a new cursor to the same position.
+ dupCursor = cursor.Duplicate(true);
+
+ // Overwrite the record.
+ dupCursor.Overwrite(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("newdata")));
+
+ // Confirm that the original data doesn't exist.
+ Assert.IsFalse(dupCursor.Move(pair, true));
+
+ dupCursor.Close();
+ cursor.Close();
+ txn.Commit();
+ db.Close();
+ env.Close();
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestDuplicateToDifferentPos()
+ {
+ BTreeDatabase db;
+ BTreeDatabaseConfig dbConfig;
+ BTreeCursor cursor, dupCursor;
+ DatabaseEnvironment env;
+ DatabaseEnvironmentConfig envConfig;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ Transaction txn;
+
+ testName = "TestDuplicateToDifferentPos";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ envConfig = new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.NoMMap = false;
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+
+ txn = env.BeginTransaction();
+ dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ db = BTreeDatabase.Open(testName + ".db", dbConfig, txn);
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data"));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ db.Put(key, data, txn);
+ txn.Commit();
+
+ txn = env.BeginTransaction();
+ cursor = db.Cursor(txn);
+ cursor.Move(key, true);
+
+ //Duplicate a new cursor to the same position.
+ dupCursor = cursor.Duplicate(false);
+
+ /*
+ * The duplicate cursor points to nothing so overwriting the
+ * record is not allowed.
+ */
+ try
+ {
+ dupCursor.Overwrite(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("newdata")));
+ }
+ catch (DatabaseException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ dupCursor.Close();
+ cursor.Close();
+ txn.Commit();
+ db.Close();
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestInsertAfter()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestInsertAfter";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Add record("key", "data") into database.
+ CursorTest.GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+ CursorTest.AddOneByCursor(db, cursor);
+
+ // Insert the new record("key","data1") after the record("key", "data").
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data1"));
+ cursor.Insert(data, Cursor.InsertLocation.AFTER);
+
+ /*
+ * Move the cursor to the record("key", "data") and confirm that
+ * the next record is the one just inserted.
+ */
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ Assert.IsTrue(cursor.Move(pair, true));
+ Assert.IsTrue(cursor.MoveNext());
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("key"), cursor.Current.Key.Data);
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("data1"), cursor.Current.Value.Data);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveFirstMultipleAndMultipleKey()
+ {
+ testName = "TestMoveFirstMultipleAndMultipleKey";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> pair;
+ MultipleKeyDatabaseEntry multiPair;
+ int cnt;
+ int[] size = new int[2];
+ size[0] = 0;
+ size[1] = 1024;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ for (int i = 0; i < 2; i++) {
+ cnt = 0;
+ if (size[i] == 0)
+ cursor.MoveFirstMultiple();
+ else
+ cursor.MoveFirstMultiple(size[i]);
+ pair = cursor.CurrentMultiple;
+ foreach (DatabaseEntry dbt in pair.Value)
+ cnt++;
+ Assert.AreEqual(1, cnt);
+ }
+
+ for (int i = 0; i < 2; i++) {
+ cnt = 0;
+ if (size[i] == 0)
+ cursor.MoveFirstMultipleKey();
+ else
+ cursor.MoveFirstMultipleKey(size[i]);
+ multiPair = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ dbt in multiPair)
+ cnt++;
+ Assert.Less(1, cnt);
+ }
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveMultiple()
+ {
+ testName = "TestMoveMultiple";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> pair;
+ int cnt;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ // Move cursor to pairs with exact 99 as its key.
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ cursor.MoveMultiple(key, true);
+ pair = cursor.CurrentMultiple;
+ Assert.AreEqual(99, BitConverter.ToInt32(pair.Key.Data, 0));
+ foreach (DatabaseEntry dbt in pair.Value)
+ cnt++;
+ Assert.AreEqual(2, cnt);
+
+ // Move cursor to pairs with the smallest key larger than 100.
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)100));
+ cursor.MoveMultiple(key, false);
+ pair = cursor.CurrentMultiple;
+ Assert.AreEqual(101, BitConverter.ToInt32(pair.Key.Data, 0));
+ foreach (DatabaseEntry dbt in pair.Value)
+ cnt++;
+ Assert.AreEqual(1, cnt);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveMultipleKey()
+ {
+ testName = "TestMoveMultipleKey";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key;
+ MultipleKeyDatabaseEntry mulPair; ;
+ int cnt;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ /*
+ * Bulk retrieve key/value pair from the pair whose key
+ * is exact 99.
+ */
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ cursor.MoveMultipleKey(key, true);
+ mulPair = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ pair in mulPair) {
+ Assert.GreaterOrEqual(3,
+ BitConverter.ToInt32(pair.Key.Data, 0) - 98);
+ cnt++;
+ }
+ Assert.AreEqual(3, cnt);
+
+ /*
+ * Bulk retrieve key/value pair from the pair whose key
+ * is the smallest one larger than 100.
+ */
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)100));
+ cursor.MoveMultipleKey(key, false);
+ mulPair = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ pair in mulPair) {
+ Assert.AreEqual(101,
+ BitConverter.ToInt32(pair.Key.Data, 0));
+ cnt++;
+ }
+ Assert.LessOrEqual(1, cnt);
+
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)100));
+ cursor.MoveMultipleKey(key, false, 1024);
+ mulPair = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ pair in mulPair) {
+ Assert.AreEqual(101,
+ BitConverter.ToInt32(pair.Key.Data, 0));
+ Assert.AreEqual(101,
+ BitConverter.ToInt32(pair.Value.Data, 0));
+ cnt++;
+ }
+ Assert.LessOrEqual(1, cnt);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveMultipleKeyWithRecno()
+ {
+ testName = "TestMoveMultipleKeyWithRecno";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ MultipleKeyDatabaseEntry multiDBT;
+ int cnt;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.UseRecordNumbers = true;
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ cnt = 0;
+ cursor.MoveMultipleKey(98);
+ multiDBT = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ pair in multiDBT)
+ cnt++;
+ Assert.AreEqual(3, cnt);
+
+ cnt = 0;
+ cursor.MoveMultipleKey(98, 1024);
+ multiDBT = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ pair in multiDBT)
+ cnt++;
+ Assert.AreEqual(3, cnt);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveMultiplePairs()
+ {
+ testName = "TestMoveMultiplePairs";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ MultipleKeyDatabaseEntry multiKeyDBTs1, multiKeyDBTs2;
+ int cnt;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.SORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ /*
+ * Bulk retrieve pairs from the pair whose key/data
+ * is exact 99/99.
+ */
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ data = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ cursor.MoveMultipleKey(pair, true);
+ multiKeyDBTs1 = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ p in multiKeyDBTs1)
+ cnt++;
+ Assert.AreEqual(3, cnt);
+
+ // Bulk retrieve pairs from the pair whose key is exact 99.
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ data = new DatabaseEntry(BitConverter.GetBytes((int)98));
+ cursor.MoveMultipleKey(pair, true);
+ multiKeyDBTs2 = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ dbts in multiKeyDBTs2)
+ cnt++;
+ Assert.AreEqual(3, cnt);
+
+ /*
+ * Bulk retrieve pairs from the pair whose key is
+ * exact 99 in buffer size of 1024k.
+ */
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ data = new DatabaseEntry(BitConverter.GetBytes((int)102));
+ cursor.MoveMultipleKey(pair, true, 1024);
+ multiKeyDBTs2 = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry>
+ dbts in multiKeyDBTs2)
+ cnt++;
+ Assert.AreEqual(3, cnt);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveMultiplePairWithKey()
+ {
+ testName = "TestMoveMultiplePairWithKey";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> mulPair;
+ int cnt;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ /*
+ * Move the cursor to pairs with exact 99 as its key
+ * and 99 as its data.
+ */
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ data = new DatabaseEntry(BitConverter.GetBytes((int)99));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ cursor.MoveMultiple(pair, true);
+ mulPair = cursor.CurrentMultiple;
+ Assert.AreEqual(99, BitConverter.ToInt32(mulPair.Key.Data, 0));
+ foreach (DatabaseEntry dbt in mulPair.Value) {
+ Assert.AreEqual(99, BitConverter.ToInt32(dbt.Data, 0));
+ cnt++;
+ }
+ Assert.AreEqual(1, cnt);
+
+ // Move cursor to pairs with the smallest key larger than 100.
+ cnt = 0;
+ key = new DatabaseEntry(BitConverter.GetBytes((int)100));
+ data = new DatabaseEntry(BitConverter.GetBytes((int)100));
+ cursor.MoveMultiple(pair, false);
+ mulPair = cursor.CurrentMultiple;
+ Assert.AreEqual(99, BitConverter.ToInt32(mulPair.Key.Data, 0));
+ foreach (DatabaseEntry dbt in mulPair.Value) {
+ Assert.GreaterOrEqual(1,
+ BitConverter.ToInt32(dbt.Data, 0) - 99);
+ cnt++;
+ }
+ Assert.AreEqual(1, cnt);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveMultipleWithRecno()
+ {
+ testName = "TestMoveMultipleWithRecno";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> pair;
+ int cnt;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.PageSize = 1024;
+ dbConfig.UseRecordNumbers = true;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ // Move cursor to the No.100 record.
+ cnt = 0;
+ cursor.MoveMultiple(100);
+ pair = cursor.CurrentMultiple;
+ Assert.AreEqual(100, BitConverter.ToInt32(pair.Key.Data, 0));
+ foreach (DatabaseEntry dbt in pair.Value)
+ cnt++;
+ Assert.AreEqual(1, cnt);
+
+ // Move cursor to the No.100 record with buffer size of 1024k.
+ cnt = 0;
+ cursor.MoveMultiple(100, 1024);
+ pair = cursor.CurrentMultiple;
+ Assert.AreEqual(100, BitConverter.ToInt32(pair.Key.Data, 0));
+ foreach (DatabaseEntry dbt in pair.Value)
+ cnt++;
+ Assert.AreEqual(1, cnt);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNextDuplicateMultipleAndMultipleKey()
+ {
+ testName = "TestMoveNextDuplicateMultipleAndMultipleKey";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> pairs;
+ MultipleKeyDatabaseEntry multiPair;
+ int cnt;
+ int[] size = new int[2];
+ size[0] = 0;
+ size[1] = 1024;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.SORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ key = new DatabaseEntry(BitConverter.GetBytes(99));
+ data = new DatabaseEntry(BitConverter.GetBytes(99));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < 2; i++) {
+ cnt = 0;
+ cursor.Move(pair, true);
+ if (j == 0) {
+ if (size[i] == 0)
+ Assert.IsTrue(cursor.MoveNextDuplicateMultiple());
+ else
+ Assert.IsTrue(cursor.MoveNextDuplicateMultiple(size[i]));
+ pairs = cursor.CurrentMultiple;
+ foreach (DatabaseEntry dbt in pairs.Value) {
+ Assert.AreEqual(100, BitConverter.ToInt32(dbt.Data, 0));
+ cnt++;
+ }
+ Assert.AreEqual(1, cnt);
+ } else {
+ if (size[i] == 0)
+ Assert.IsTrue(cursor.MoveNextDuplicateMultipleKey());
+ else
+ Assert.IsTrue(cursor.MoveNextDuplicateMultipleKey(size[i]));
+ multiPair = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry> p in multiPair) {
+ Assert.AreEqual(100, BitConverter.ToInt32(p.Value.Data, 0));
+ cnt++;
+ }
+ Assert.AreEqual(1, cnt);
+ }
+ }
+ }
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNextUniqueMultipleAndMultipleKey()
+ {
+ testName = "TestMoveNextUniqueMultipleAndMultipleKey";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> pairs;
+ MultipleKeyDatabaseEntry multiPair;
+ int cnt;
+ int[] size = new int[2];
+ size[0] = 0;
+ size[1] = 1024;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ key = new DatabaseEntry(BitConverter.GetBytes(99));
+ data = new DatabaseEntry(BitConverter.GetBytes(99));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < 2; i++) {
+ cnt = 0;
+ cursor.Move(pair, true);
+ if (j == 0) {
+ if (size[i] == 0)
+ Assert.IsTrue(cursor.MoveNextUniqueMultiple());
+ else
+ Assert.IsTrue(cursor.MoveNextUniqueMultiple(size[i]));
+ pairs = cursor.CurrentMultiple;
+ foreach (DatabaseEntry dbt in pairs.Value) {
+ Assert.AreEqual(101, BitConverter.ToInt32(dbt.Data, 0));
+ cnt++;
+ }
+ Assert.AreEqual(1, cnt);
+ } else {
+ if (size[i] == 0)
+ Assert.IsTrue(cursor.MoveNextUniqueMultipleKey());
+ else
+ Assert.IsTrue(cursor.MoveNextUniqueMultipleKey(size[i]));
+ multiPair = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry> p in multiPair) {
+ Assert.AreEqual(101, BitConverter.ToInt32(p.Value.Data, 0));
+ cnt++;
+ }
+ Assert.AreEqual(1, cnt);
+ }
+ }
+ }
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestRefreshMultipleAndMultipleKey()
+ {
+ testName = "TestRefreshMultipleAndMultipleKey";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> pairs;
+ MultipleKeyDatabaseEntry multiPair;
+ int cnt;
+ int[] size = new int[2];
+ size[0] = 0;
+ size[1] = 1024;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.Duplicates = DuplicatesPolicy.SORTED;
+ dbConfig.PageSize = 1024;
+ GetMultipleDB(btreeDBFileName, dbConfig, out db, out cursor);
+
+ key = new DatabaseEntry(BitConverter.GetBytes(99));
+ data = new DatabaseEntry(BitConverter.GetBytes(99));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < 2; i++) {
+ cnt = 0;
+ cursor.Move(pair, true);
+ if (j == 0) {
+ if (size[i] == 0)
+ Assert.IsTrue(cursor.RefreshMultiple());
+ else
+ Assert.IsTrue(cursor.RefreshMultiple(size[i]));
+ pairs = cursor.CurrentMultiple;
+ foreach (DatabaseEntry dbt in pairs.Value)
+ cnt++;
+ Assert.AreEqual(2, cnt);
+ } else {
+ if (size[i] == 0)
+ Assert.IsTrue(cursor.RefreshMultipleKey());
+ else
+ Assert.IsTrue(cursor.RefreshMultipleKey(size[i]));
+ multiPair = cursor.CurrentMultipleKey;
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry> p in multiPair)
+ cnt++;
+ Assert.AreEqual(3, cnt);
+ }
+ }
+ }
+
+ cursor.Close();
+ db.Close();
+ }
+
+ private void GetMultipleDB(string dbFileName, BTreeDatabaseConfig dbConfig,
+ out BTreeDatabase db, out BTreeCursor cursor)
+ {
+ db = BTreeDatabase.Open(dbFileName, dbConfig);
+ cursor = db.Cursor();
+
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ DatabaseEntry key, data;
+ for (int i = 1; i < 100; i++) {
+ key = new DatabaseEntry(BitConverter.GetBytes(i));
+ data = new DatabaseEntry(BitConverter.GetBytes(i));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ cursor.Add(pair);
+ }
+
+ if (dbConfig.UseRecordNumbers == true) {
+ byte[] bytes = new byte[512];
+ for (int i = 0; i < 512; i++)
+ bytes[i] = (byte)i;
+ key = new DatabaseEntry(BitConverter.GetBytes(100));
+ data = new DatabaseEntry(bytes);
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ cursor.Add(pair);
+ } else {
+ if (dbConfig.Duplicates == DuplicatesPolicy.UNSORTED ||
+ dbConfig.Duplicates == DuplicatesPolicy.SORTED) {
+ key = new DatabaseEntry(BitConverter.GetBytes(99));
+ data = new DatabaseEntry(BitConverter.GetBytes(100));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ cursor.Add(pair);
+ }
+
+ key = new DatabaseEntry(BitConverter.GetBytes(101));
+ data = new DatabaseEntry(BitConverter.GetBytes(101));
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ cursor.Add(pair);
+ }
+ }
+
+ [Test]
+ public void TestInsertBefore()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestInsertBefore";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Add record("key", "data") into database.
+ CursorTest.GetCursorInBtreeDBWithoutEnv(
+ testHome, testName, out db, out cursor);
+ CursorTest.AddOneByCursor(db, cursor);
+
+ // Insert the new record("key","data1") before the record("key", "data").
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data1"));
+ cursor.Insert(data, Cursor.InsertLocation.BEFORE);
+
+ /*
+ * Move the cursor to the record("key", "data") and confirm
+ * that the previous record is the one just inserted.
+ */
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ Assert.IsTrue(cursor.Move(pair, true));
+ Assert.IsTrue(cursor.MovePrev());
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("key"),cursor.Current.Key.Data);
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("data1"), cursor.Current.Value.Data);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveToRecno()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMoveToRecno";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBUsingRecno(testHome, testName,
+ out db, out cursor);
+ for (int i = 0; i < 10; i++)
+ db.Put(
+ new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+
+ MoveCursorToRecno(cursor, null);
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveToRecnoWithRMW()
+ {
+ testName = "TestMoveToRecnoWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ btCursorFunc = new BTCursorMoveFuncDelegate(
+ MoveCursorToRecno);
+
+ // Move to a specified key and a key/data pair.
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestRecno()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestRecno";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBUsingRecno(testHome, testName,
+ out db, out cursor);
+ for (int i = 0; i < 10; i++)
+ db.Put(
+ new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+
+ ReturnRecno(cursor, null);
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestRecnoWithRMW()
+ {
+ testName = "TestRecnoWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ // Use MoveCursorToRecno() as its move function.
+ btCursorFunc = new BTCursorMoveFuncDelegate(
+ ReturnRecno);
+
+ // Move to a specified key and a key/data pair.
+ MoveWithRMW(testHome, testName);
+ }
+
+ /*
+ * Move the cursor according to recno. The recno
+ * starts from 1 and is increased by 1.
+ */
+ public void MoveCursorToRecno(BTreeCursor cursor,
+ LockingInfo lck)
+ {
+ for (uint i = 1; i <= 5; i++)
+ if (lck == null)
+ Assert.IsTrue(cursor.Move(i));
+ else
+ Assert.IsTrue(cursor.Move(i, lck));
+ }
+
+ /*l
+ * Move the cursor according to a given recno and
+ * return the current record's recno. The given recno
+ * and the one got from the cursor should be the same.
+ */
+ public void ReturnRecno(BTreeCursor cursor,
+ LockingInfo lck)
+ {
+ for (uint i = 1; i <= 5; i++)
+ if (lck == null)
+ {
+ if (cursor.Move(i) == true)
+ Assert.AreEqual(i, cursor.Recno());
+ }
+ else
+ {
+ if (cursor.Move(i, lck) == true)
+ Assert.AreEqual(i, cursor.Recno(lck));
+ }
+ }
+
+ public void GetCursorInBtreeDBUsingRecno(string home,
+ string name, out BTreeDatabase db,
+ out BTreeCursor cursor)
+ {
+ string dbFileName = home + "/" + name + ".db";
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.UseRecordNumbers = true;
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ db = BTreeDatabase.Open(dbFileName, dbConfig);
+ cursor = db.Cursor();
+ }
+
+ public void RdMfWt()
+ {
+ Transaction txn = paramEnv.BeginTransaction();
+ BTreeCursor dbc = paramDB.Cursor(txn);
+
+ try
+ {
+ LockingInfo lck = new LockingInfo();
+ lck.ReadModifyWrite = true;
+
+ // Read record.
+ btCursorFunc(dbc, lck);
+
+ // Block the current thread until event is set.
+ signal.WaitOne();
+
+ // Write new records into database.
+ DatabaseEntry key = new DatabaseEntry(BitConverter.GetBytes(55));
+ DatabaseEntry data = new DatabaseEntry(BitConverter.GetBytes(55));
+ dbc.Add(new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data));
+
+ dbc.Close();
+ txn.Commit();
+ }
+ catch (DeadlockException)
+ {
+ dbc.Close();
+ txn.Abort();
+ }
+ }
+
+
+ public void MoveWithRMW(string home, string name)
+ {
+ paramEnv = null;
+ paramDB = null;
+
+ // Open the environment.
+ DatabaseEnvironmentConfig envCfg =
+ new DatabaseEnvironmentConfig();
+ envCfg.Create = true;
+ envCfg.FreeThreaded = true;
+ envCfg.UseLocking = true;
+ envCfg.UseLogging = true;
+ envCfg.UseMPool = true;
+ envCfg.UseTxns = true;
+ paramEnv = DatabaseEnvironment.Open(home, envCfg);
+
+ // Open database in transaction.
+ Transaction openTxn = paramEnv.BeginTransaction();
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ cfg.Env = paramEnv;
+ cfg.FreeThreaded = true;
+ cfg.PageSize = 4096;
+ // Use record number.
+ cfg.UseRecordNumbers = true;
+ paramDB = BTreeDatabase.Open(name + ".db", cfg, openTxn);
+ openTxn.Commit();
+
+ /*
+ * Put 10 different, 2 duplicate and another different
+ * records into database.
+ */
+ Transaction txn = paramEnv.BeginTransaction();
+ for (int i = 0; i < 13; i++)
+ {
+ DatabaseEntry key, data;
+ if (i == 10 || i == 11)
+ {
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data"));
+ }
+ else
+ {
+ key = new DatabaseEntry(BitConverter.GetBytes(i));
+ data = new DatabaseEntry(BitConverter.GetBytes(i));
+ }
+ paramDB.Put(key, data, txn);
+ }
+
+ txn.Commit();
+
+ // Get a event wait handle.
+ signal = new EventWaitHandle(false,
+ EventResetMode.ManualReset);
+
+ /*
+ * Start RdMfWt() in two threads. RdMfWt() reads
+ * and writes data into database.
+ */
+ Thread t1 = new Thread(new ThreadStart(RdMfWt));
+ Thread t2 = new Thread(new ThreadStart(RdMfWt));
+ t1.Start();
+ t2.Start();
+
+ // Give both threads time to read before signalling them to write.
+ Thread.Sleep(1000);
+
+ // Invoke the write operation in both threads.
+ signal.Set();
+
+ // Return the number of deadlocks.
+ while (t1.IsAlive || t2.IsAlive)
+ {
+ /*
+ * Give both threads time to write before
+ * counting the number of deadlocks.
+ */
+ Thread.Sleep(1000);
+ uint deadlocks = paramEnv.DetectDeadlocks(DeadlockPolicy.DEFAULT);
+
+ // Confirm that there won't be any deadlock.
+ Assert.AreEqual(0, deadlocks);
+ }
+
+ t1.Join();
+ t2.Join();
+ paramDB.Close();
+ paramEnv.Close();
+ }
+
+ }
+}
+
+
diff --git a/db-4.8.30/test/scr037/BTreeDatabaseConfigTest.cs b/db-4.8.30/test/scr037/BTreeDatabaseConfigTest.cs
new file mode 100644
index 0000000..69170dd
--- /dev/null
+++ b/db-4.8.30/test/scr037/BTreeDatabaseConfigTest.cs
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class BTreeDatabaseConfigTest : DatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "BTreeDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ override public void TestConfigWithoutEnv()
+ {
+ testName = "TestConfigWithoutEnv";
+
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ BTreeDatabaseConfig btreeConfig =
+ new BTreeDatabaseConfig();
+ Config(xmlElem, ref btreeConfig, true);
+ Confirm(xmlElem, btreeConfig, true);
+ }
+
+ public static void Confirm(XmlElement
+ xmlElement, BTreeDatabaseConfig btreeDBConfig,
+ bool compulsory)
+ {
+ DatabaseConfig dbConfig = btreeDBConfig;
+ Confirm(xmlElement, dbConfig, compulsory);
+
+ // Confirm Btree database specific configuration
+ Configuration.ConfirmDuplicatesPolicy(xmlElement,
+ "Duplicates", btreeDBConfig.Duplicates, compulsory);
+ Configuration.ConfirmBool(xmlElement,
+ "NoReverseSplitting",
+ btreeDBConfig.NoReverseSplitting, compulsory);
+ Configuration.ConfirmBool(xmlElement,
+ "UseRecordNumbers", btreeDBConfig.UseRecordNumbers,
+ compulsory);
+ Configuration.ConfirmCreatePolicy(xmlElement,
+ "Creation", btreeDBConfig.Creation, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MinKeysPerPage",
+ btreeDBConfig.MinKeysPerPage, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref BTreeDatabaseConfig btreeDBConfig, bool compulsory)
+ {
+ uint minKeysPerPage = new uint();
+ DatabaseConfig dbConfig = btreeDBConfig;
+ Config(xmlElement, ref dbConfig, compulsory);
+
+ // Configure specific fields/properties of Btree db
+ Configuration.ConfigDuplicatesPolicy(xmlElement,
+ "Duplicates", ref btreeDBConfig.Duplicates,
+ compulsory);
+ Configuration.ConfigBool(xmlElement,
+ "NoReverseSplitting",
+ ref btreeDBConfig.NoReverseSplitting, compulsory);
+ Configuration.ConfigBool(xmlElement,
+ "UseRecordNumbers",
+ ref btreeDBConfig.UseRecordNumbers, compulsory);
+ Configuration.ConfigCreatePolicy(xmlElement,
+ "Creation", ref btreeDBConfig.Creation, compulsory);
+ if (Configuration.ConfigUint(xmlElement,
+ "MinKeysPerPage", ref minKeysPerPage, compulsory))
+ btreeDBConfig.MinKeysPerPage = minKeysPerPage;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/BTreeDatabaseTest.cs b/db-4.8.30/test/scr037/BTreeDatabaseTest.cs
new file mode 100644
index 0000000..2643f41
--- /dev/null
+++ b/db-4.8.30/test/scr037/BTreeDatabaseTest.cs
@@ -0,0 +1,2309 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class BTreeDatabaseTest : DatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "BTreeDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestCompactWithoutTxn()
+ {
+ int i, nRecs;
+ nRecs = 10000;
+ testName = "TestCompactWithoutTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ // The minimum page size
+ btreeDBConfig.PageSize = 512;
+ btreeDBConfig.BTreeCompare =
+ new EntryComparisonDelegate(dbIntCompare);
+ using (BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig))
+ {
+ DatabaseEntry key;
+ DatabaseEntry data;
+
+ // Fill the database with entries from 0 to 9999
+ for (i = 0; i < nRecs; i++)
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ btreeDB.Put(key, data);
+ }
+
+ /*
+ * Delete entries below 500, between 3000 and
+ * 5000 and above 7000
+ */
+ for (i = 0; i < nRecs; i++)
+ if (i < 500 || i > 7000 ||
+ (i < 5000 && i > 3000))
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ btreeDB.Delete(key);
+ }
+
+ btreeDB.Sync();
+ long fileSize = new FileInfo(
+ btreeDBFileName).Length;
+
+ // Compact database
+ CompactConfig cCfg = new CompactConfig();
+ cCfg.FillPercentage = 30;
+ cCfg.Pages = 10;
+ cCfg.Timeout = 1000;
+ cCfg.TruncatePages = true;
+ cCfg.start = new DatabaseEntry(
+ BitConverter.GetBytes(1));
+ cCfg.stop = new DatabaseEntry(
+ BitConverter.GetBytes(7000));
+ CompactData compactData = btreeDB.Compact(cCfg);
+ Assert.IsFalse((compactData.Deadlocks == 0) &&
+ (compactData.Levels == 0) &&
+ (compactData.PagesExamined == 0) &&
+ (compactData.PagesFreed == 0) &&
+ (compactData.PagesTruncated == 0));
+
+ btreeDB.Sync();
+ long compactedFileSize =
+ new FileInfo(btreeDBFileName).Length;
+ Assert.Less(compactedFileSize, fileSize);
+ }
+ }
+
+ [Test]
+ public void TestCompression() {
+ testName = "TestCompression";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ cfg.SetCompression(compress, decompress);
+ BTreeDatabase db = BTreeDatabase.Open(btreeDBName, cfg);
+ DatabaseEntry key, data;
+ char[] keyData = { 'A', 'A', 'A', 'A' };
+ byte[] dataData = new byte[20];
+ Random generator = new Random();
+ int i;
+ for (i = 0; i < 20000; i++) {
+ // Write random data
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes(keyData));
+ generator.NextBytes(dataData);
+ data = new DatabaseEntry(dataData);
+ db.Put(key, data);
+
+ // Bump the key. Rollover from Z to A if necessary
+ int j = keyData.Length;
+ do {
+ j--;
+ if (keyData[j]++ == 'Z')
+ keyData[j] = 'A';
+ } while (keyData[j] == 'A');
+ }
+ db.Close();
+ }
+
+ bool compress(DatabaseEntry prevKey, DatabaseEntry prevData,
+ DatabaseEntry key, DatabaseEntry data, ref byte[] dest, out int size) {
+ /*
+ * Just a dummy function that doesn't do any compression. It just
+ * writes the 5 byte key and 20 byte data to the buffer.
+ */
+ size = key.Data.Length + data.Data.Length;
+ if (size > dest.Length)
+ return false;
+ key.Data.CopyTo(dest, 0);
+ data.Data.CopyTo(dest, key.Data.Length);
+ return true;
+ }
+
+ KeyValuePair<DatabaseEntry, DatabaseEntry> decompress(
+ DatabaseEntry prevKey, DatabaseEntry prevData, byte[] compressed, out uint bytesRead) {
+ byte[] keyData = new byte[4];
+ byte[] dataData = new byte[20];
+ Array.ConstrainedCopy(compressed, 0, keyData, 0, 4);
+ Array.ConstrainedCopy(compressed, 4, dataData, 0, 20);
+ DatabaseEntry key = new DatabaseEntry(keyData);
+ DatabaseEntry data = new DatabaseEntry(dataData);
+ bytesRead = (uint)(key.Data.Length + data.Data.Length);
+ return new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ }
+
+ [Test]
+ public void TestCompressionDefault() {
+ testName = "TestCompressionDefault";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase db = BTreeDatabase.Open(btreeDBName, cfg);
+ DatabaseEntry key, data;
+ char[] keyData = { 'A', 'A', 'A', 'A' };
+ byte[] dataData = new byte[20];
+ Random generator = new Random();
+ int i;
+ for (i = 0; i < 20000; i++) {
+ // Write random data
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes(keyData));
+ generator.NextBytes(dataData);
+ data = new DatabaseEntry(dataData);
+ db.Put(key, data);
+
+ // Bump the key. Rollover from Z to A if necessary
+ int j = keyData.Length;
+ do {
+ j--;
+ if (keyData[j]++ == 'Z')
+ keyData[j] = 'A';
+ } while (keyData[j] == 'A');
+ }
+ db.Close();
+
+ FileInfo dbInfo = new FileInfo(btreeDBName);
+ long uncompressedSize = dbInfo.Length;
+ Configuration.ClearDir(testHome);
+
+ cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ cfg.SetCompression();
+ db = BTreeDatabase.Open(btreeDBName, cfg);
+ keyData = new char[]{ 'A', 'A', 'A', 'A' };
+ for (i = 0; i < 20000; i++) {
+ // Write random data
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes(keyData));
+ generator.NextBytes(dataData);
+ data = new DatabaseEntry(dataData);
+ db.Put(key, data);
+
+ // Bump the key. Rollover from Z to A if necessary
+ int j = keyData.Length;
+ do {
+ j--;
+ if (keyData[j]++ == 'Z')
+ keyData[j] = 'A';
+ } while (keyData[j] == 'A');
+ }
+ Cursor dbc = db.Cursor();
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry> kvp in dbc)
+ i--;
+ dbc.Close();
+ Assert.AreEqual(i, 0);
+ db.Close();
+
+ dbInfo = new FileInfo(btreeDBName);
+ Assert.Less(dbInfo.Length, uncompressedSize);
+ Console.WriteLine("Uncompressed: {0}", uncompressedSize);
+ Console.WriteLine("Compressed: {0}", dbInfo.Length);
+
+ Configuration.ClearDir(testHome);
+
+ cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ cfg.SetCompression();
+ db = BTreeDatabase.Open(btreeDBName, cfg);
+ for (i = 1023; i < 1124; i++){
+ key = new DatabaseEntry(BitConverter.GetBytes(i));
+ data = new DatabaseEntry(BitConverter.GetBytes(i + 3));
+ db.Put(key, data);
+ }
+ dbc = db.Cursor();
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry> kvp in dbc){
+ int keyInt = BitConverter.ToInt32(kvp.Key.Data, 0);
+ int dataInt = BitConverter.ToInt32(kvp.Value.Data, 0);
+ Assert.AreEqual(3, dataInt - keyInt);
+ }
+ dbc.Close();
+
+ db.Close();
+ }
+
+ [Test, ExpectedException(typeof(AccessViolationException))]
+ public void TestClose()
+ {
+ testName = "TestClose";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig);
+ btreeDB.Close();
+ DatabaseEntry key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("hi"));
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("hi"));
+ btreeDB.Put(key, data);
+ }
+
+ [Test]
+ public void TestCloseWithoutSync()
+ {
+ testName = "TestCloseWithoutSync";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBName = testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.ForceFlush = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.LogSystemCfg = new LogConfig();
+ envConfig.LogSystemCfg.ForceSync = false;
+ envConfig.LogSystemCfg.AutoRemove = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ TransactionConfig txnConfig = new TransactionConfig();
+ txnConfig.SyncAction =
+ TransactionConfig.LogFlush.WRITE_NOSYNC;
+ Transaction txn = env.BeginTransaction(txnConfig);
+
+ BTreeDatabaseConfig btreeConfig =
+ new BTreeDatabaseConfig();
+ btreeConfig.Creation = CreatePolicy.ALWAYS;
+ btreeConfig.Env = env;
+
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBName, btreeConfig, txn);
+
+ DatabaseEntry key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ Assert.IsFalse(btreeDB.Exists(key, txn));
+ btreeDB.Put(key, data, txn);
+ btreeDB.Close(false);
+ txn.Commit();
+ env.Close();
+
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.NEVER;
+ using (BTreeDatabase db = BTreeDatabase.Open(
+ testHome + "/" + btreeDBName, dbConfig))
+ {
+ Assert.IsFalse(db.Exists(key));
+ }
+ }
+
+ [Test]
+ public void TestCursorWithoutEnv()
+ {
+ BTreeCursor cursor;
+ BTreeDatabase db;
+ string dbFileName;
+
+ testName = "TestCursorWithoutEnv";
+ testHome = testFixtureHome + "/" + testName;
+ dbFileName = testHome + "/" + testName + ".db";
+
+ // Open btree database.
+ Configuration.ClearDir(testHome);
+ OpenBtreeDB(null, null, dbFileName, out db);
+
+ // Get a cursor.
+ cursor = db.Cursor();
+
+ /*
+ * Add a record to the database with cursor and
+ * confirm that the record exists in the database.
+ */
+ CursorTest.AddOneByCursor(db, cursor);
+
+ // Close cursor and database.
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestCursorWithConfigInTxn()
+ {
+ BTreeCursor cursor;
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ Transaction txn;
+ string dbFileName;
+
+ testName = "TestCursorWithConfigInTxn";
+ testHome = testFixtureHome + "/" + testName;
+ dbFileName = testName + ".db";
+
+ // Open environment and begin a transaction.
+ Configuration.ClearDir(testHome);
+
+ SetUpEnvAndTxn(testHome, out env, out txn);
+ OpenBtreeDB(env, txn, dbFileName, out db);
+
+ // Config and get a cursor.
+ cursor = db.Cursor(new CursorConfig(), txn);
+
+ /*
+ * Add a record to the database with cursor and
+ * confirm that the record exists in the database.
+ */
+ CursorTest.AddOneByCursor(db, cursor);
+
+ /*
+ * Close cursor, database, commit the transaction
+ * and close the environment.
+ */
+ cursor.Close();
+ db.Close();
+ txn.Commit();
+ env.Close();
+ }
+
+ [Test]
+ public void TestCursorWithoutConfigInTxn()
+ {
+ BTreeCursor cursor;
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ Transaction txn;
+ string dbFileName;
+
+ testName = "TestCursorWithoutConfigInTxn";
+ testHome = testFixtureHome + "/" + testName;
+ dbFileName = testName + ".db";
+
+ // Open environment and begin a transaction.
+ Configuration.ClearDir(testHome);
+ SetUpEnvAndTxn(testHome, out env, out txn);
+ OpenBtreeDB(env, txn, dbFileName, out db);
+
+ // Get a cursor in the transaction.
+ cursor = db.Cursor(txn);
+
+ /*
+ * Add a record to the database with cursor and
+ * confirm that the record exists in the database.
+ */
+ CursorTest.AddOneByCursor(db, cursor);
+
+ /*
+ * Close cursor, database, commit the transaction
+ * and close the environment.
+ */
+ cursor.Close();
+ db.Close();
+ txn.Commit();
+ env.Close();
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestDelete()
+ {
+ testName = "TestDelete";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig);
+ DatabaseEntry key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ btreeDB.Put(key, data);
+ btreeDB.Delete(key);
+ try
+ {
+ btreeDB.Get(key);
+ }
+ catch (NotFoundException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ btreeDB.Close();
+ }
+ }
+
+ [Test]
+ public void TestExist()
+ {
+ testName = "TestExist";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB;
+ using (btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig))
+ {
+ DatabaseEntry key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+
+ btreeDB.Put(key, data);
+ Assert.IsTrue(btreeDB.Exists(
+ new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"))));
+ Assert.IsFalse(btreeDB.Exists(
+ new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"))));
+ }
+ }
+
+ [Test]
+ public void TestExistWithTxn()
+ {
+ BTreeDatabase btreeDB;
+ Transaction txn;
+ DatabaseEnvironmentConfig envConfig;
+ DatabaseEnvironment env;
+ DatabaseEntry key, data;
+
+ testName = "TestExistWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment.
+ envConfig = new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Begin a transaction.
+ txn = env.BeginTransaction();
+
+ // Open a database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ btreeDBConfig.Env = env;
+ btreeDB = BTreeDatabase.Open(testName + ".db",
+ btreeDBConfig, txn);
+
+ // Put key data pair into database.
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ btreeDB.Put(key, data, txn);
+
+ // Confirm that the pair exists in the database.
+ Assert.IsTrue(btreeDB.Exists(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")), txn));
+ Assert.IsFalse(btreeDB.Exists(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data")), txn));
+
+ // Dispose all.
+ btreeDB.Close();
+ txn.Commit();
+ env.Close();
+ }
+
+
+ [Test]
+ public void TestExistWithLockingInfo()
+ {
+ BTreeDatabase btreeDB;
+ DatabaseEnvironment env;
+ DatabaseEntry key, data;
+
+ testName = "TestExistWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open the environment.
+ DatabaseEnvironmentConfig envCfg =
+ new DatabaseEnvironmentConfig();
+ envCfg.Create = true;
+ envCfg.FreeThreaded = true;
+ envCfg.UseLocking = true;
+ envCfg.UseLogging = true;
+ envCfg.UseMPool = true;
+ envCfg.UseTxns = true;
+ env = DatabaseEnvironment.Open(
+ testHome, envCfg);
+
+ // Open database in transaction.
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig cfg =
+ new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ cfg.Env = env;
+ cfg.FreeThreaded = true;
+ cfg.PageSize = 4096;
+ cfg.Duplicates = DuplicatesPolicy.UNSORTED;
+ btreeDB = BTreeDatabase.Open(testName + ".db",
+ cfg, openTxn);
+ openTxn.Commit();
+
+ // Put key data pair into database.
+ Transaction txn = env.BeginTransaction();
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ btreeDB.Put(key, data, txn);
+
+ // Confirm that the pair exists in the database with LockingInfo.
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.ReadModifyWrite = true;
+
+ // Confirm that the pair exists in the database.
+ Assert.IsTrue(btreeDB.Exists(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")), txn, lockingInfo));
+ Assert.IsFalse(btreeDB.Exists(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data")), txn, lockingInfo));
+ txn.Commit();
+
+ btreeDB.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestGetByKey()
+ {
+ testName = "TestGetByKey";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName =
+ Path.GetFileNameWithoutExtension(btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBName, btreeDBConfig);
+
+ DatabaseEntry key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ btreeDB.Put(key, data);
+
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair =
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>();
+ pair = btreeDB.Get(key);
+ Assert.AreEqual(pair.Key.Data, key.Data);
+ Assert.AreEqual(pair.Value.Data, data.Data);
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestGetByRecno()
+ {
+ testName = "TestGetByRecno";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName =
+ Path.GetFileNameWithoutExtension(btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.UseRecordNumbers = true;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBName, btreeDBConfig);
+ Assert.IsTrue(btreeDB.RecordNumbers);
+
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry data = new DatabaseEntry();
+ uint recno, count, value;
+ for (recno = 1; recno <= 100; recno++)
+ {
+ value = 200 - recno;
+ Configuration.dbtFromString(key,
+ Convert.ToString(value));
+ Configuration.dbtFromString(data,
+ Convert.ToString(value));
+ btreeDB.Put(key, data);
+ }
+
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair =
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>();
+
+ for (count = 1; ; count++)
+ {
+ try
+ {
+ pair = btreeDB.Get(count);
+ }
+ catch (NotFoundException)
+ {
+ Assert.AreEqual(101, count);
+ break;
+ }
+ value = 299 - 200 + count;
+ Assert.AreEqual(value.ToString(),
+ Configuration.strFromDBT(pair.Key));
+ }
+
+ btreeDB.Close();
+ }
+
+ [Test, ExpectedException(typeof(NotFoundException))]
+ public void TestGetBoth()
+ {
+ testName = "TestGetBoth";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName =
+ Path.GetFileNameWithoutExtension(btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ using (BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBName, btreeDBConfig))
+ {
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry data = new DatabaseEntry();
+
+ Configuration.dbtFromString(key, "key");
+ Configuration.dbtFromString(data, "data");
+ btreeDB.Put(key, data);
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair =
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>();
+ pair = btreeDB.GetBoth(key, data);
+ Assert.AreEqual(key.Data, pair.Key.Data);
+ Assert.AreEqual(data.Data, pair.Value.Data);
+
+ Configuration.dbtFromString(key, "key");
+ Configuration.dbtFromString(data, "key");
+ btreeDB.GetBoth(key, data);
+ }
+ }
+
+ [Test]
+ public void TestGetBothMultiple()
+ {
+ testName = "TestGetBothMultiple";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName = testName;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> kvp;
+ int cnt;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ btreeDBConfig.PageSize = 1024;
+ using (BTreeDatabase btreeDB = GetMultipleDB(
+ btreeDBFileName, btreeDBName, btreeDBConfig)) {
+ key = new DatabaseEntry(BitConverter.GetBytes(100));
+ data = new DatabaseEntry(BitConverter.GetBytes(100));
+
+ kvp = btreeDB.GetBothMultiple(key, data);
+ cnt = 0;
+ foreach (DatabaseEntry dbt in kvp.Value)
+ cnt++;
+ Assert.AreEqual(cnt, 10);
+
+ kvp = btreeDB.GetBothMultiple(key, data, 1024);
+ cnt = 0;
+ foreach (DatabaseEntry dbt in kvp.Value)
+ cnt++;
+ Assert.AreEqual(cnt, 10);
+ }
+ }
+
+ [Test]
+ public void TestGetMultiple()
+ {
+ testName = "TestGetMultiple";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName = testName;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ btreeDBConfig.PageSize = 512;
+
+ using (BTreeDatabase btreeDB = GetMultipleDB(
+ btreeDBFileName, btreeDBName, btreeDBConfig)) {
+ DatabaseEntry key = new DatabaseEntry(
+ BitConverter.GetBytes(10));
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> kvp =
+ btreeDB.GetMultiple(key, 1024);
+ int cnt = 0;
+ foreach (DatabaseEntry dbt in kvp.Value)
+ cnt++;
+ Assert.AreEqual(cnt, 10);
+
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(102));
+ kvp = btreeDB.GetMultiple(key, 1024);
+ cnt = 0;
+ foreach (DatabaseEntry dbt in kvp.Value)
+ cnt++;
+ Assert.AreEqual(cnt, 1);
+ }
+ }
+
+
+ [Test]
+ public void TestGetMultipleByRecno()
+ {
+ testName = "TestGetMultipleByRecno";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName =
+ Path.GetFileNameWithoutExtension(btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.Duplicates = DuplicatesPolicy.NONE;
+ btreeDBConfig.UseRecordNumbers = true;
+ using (BTreeDatabase btreeDB = GetMultipleDB(
+ btreeDBFileName, btreeDBName, btreeDBConfig)) {
+ int recno = 44;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> kvp =
+ btreeDB.GetMultiple((uint)recno);
+ int cnt = 0;
+ int kdata = BitConverter.ToInt32(kvp.Key.Data, 0);
+ Assert.AreEqual(kdata, recno);
+ foreach (DatabaseEntry dbt in kvp.Value) {
+ cnt++;
+ int ddata = BitConverter.ToInt32(dbt.Data, 0);
+ Assert.AreEqual(ddata, recno);
+ }
+ Assert.AreEqual(cnt, 1);
+ }
+ }
+
+ [Test]
+ public void TestGetMultipleByRecnoInSize()
+ {
+ testName = "TestGetMultipleByRecnoInSize";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName =
+ Path.GetFileNameWithoutExtension(btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.Duplicates = DuplicatesPolicy.NONE;
+ btreeDBConfig.UseRecordNumbers = true;
+ btreeDBConfig.PageSize = 512;
+ using (BTreeDatabase btreeDB = GetMultipleDB(
+ btreeDBFileName, btreeDBName, btreeDBConfig)) {
+ int recno = 100;
+ int bufferSize = 1024;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> kvp =
+ btreeDB.GetMultiple((uint)recno, bufferSize);
+ int cnt = 0;
+ int kdata = BitConverter.ToInt32(kvp.Key.Data, 0);
+ Assert.AreEqual(kdata, recno);
+ foreach (DatabaseEntry dbt in kvp.Value) {
+ cnt++;
+ Assert.AreEqual(dbt.Data.Length, 111);
+ }
+ Assert.AreEqual(1, cnt);
+ }
+ }
+
+ [Test]
+ public void TestGetMultipleInSize()
+ {
+ testName = "TestGetMultipleInSize";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName =
+ Path.GetFileNameWithoutExtension(btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ btreeDBConfig.PageSize = 1024;
+ using (BTreeDatabase btreeDB = GetMultipleDB(
+ btreeDBFileName, btreeDBName, btreeDBConfig)) {
+
+ int num = 101;
+ DatabaseEntry key = new DatabaseEntry(
+ BitConverter.GetBytes(num));
+ int bufferSize = 10240;
+ KeyValuePair<DatabaseEntry, MultipleDatabaseEntry> kvp =
+ btreeDB.GetMultiple(key, bufferSize);
+ int cnt = 0;
+ foreach (DatabaseEntry dbt in kvp.Value) {
+ cnt++;
+ Assert.AreEqual(BitConverter.ToInt32(
+ dbt.Data, 0), num);
+ num++;
+ }
+ Assert.AreEqual(cnt, 923);
+ }
+ }
+
+ [Test]
+ public void TestGetWithTxn()
+ {
+ testName = "TestGetWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ try
+ {
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Env = env;
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase db = BTreeDatabase.Open(
+ testName + ".db", dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction putTxn = env.BeginTransaction();
+ try
+ {
+ for (int i = 0; i < 20; i++)
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(
+ BitConverter.GetBytes(i)), putTxn);
+ putTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ putTxn.Abort();
+ db.Close();
+ throw e;
+ }
+
+ Transaction getTxn = env.BeginTransaction();
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ try
+ {
+ for (int i = 0; i < 20; i++)
+ {
+ pair = db.Get(new DatabaseEntry(
+ BitConverter.GetBytes(i)), getTxn);
+ Assert.AreEqual(BitConverter.GetBytes(i),
+ pair.Key.Data);
+ }
+
+ getTxn.Commit();
+ db.Close();
+ }
+ catch (DatabaseException)
+ {
+ getTxn.Abort();
+ db.Close();
+ throw new TestException();
+ }
+ }
+ catch (DatabaseException)
+ {
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestKeyRange()
+ {
+ testName = "TestKeyRange";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName = Path.GetFileNameWithoutExtension(
+ btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBName, btreeDBConfig);
+
+ DatabaseEntry key = new DatabaseEntry();
+ DatabaseEntry data = new DatabaseEntry();
+ uint recno;
+ for (recno = 1; recno <= 10; recno++)
+ {
+ Configuration.dbtFromString(key,
+ Convert.ToString(recno));
+ Configuration.dbtFromString(data,
+ Convert.ToString(recno));
+ btreeDB.Put(key, data);
+ }
+
+ Configuration.dbtFromString(key, Convert.ToString(5));
+ KeyRange keyRange = btreeDB.KeyRange(key);
+ Assert.AreEqual(0.5, keyRange.Less);
+ Assert.AreEqual(0.1, keyRange.Equal);
+ Assert.AreEqual(0.4, keyRange.Greater);
+
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestOpenExistingBtreeDB()
+ {
+ testName = "TestOpenExistingBtreeDB";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeConfig =
+ new BTreeDatabaseConfig();
+ btreeConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeConfig);
+ btreeDB.Close();
+
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ Database db = Database.Open(btreeDBFileName,
+ dbConfig);
+ Assert.AreEqual(db.Type, DatabaseType.BTREE);
+ Assert.AreEqual(db.Creation, CreatePolicy.NEVER);
+ db.Close();
+ }
+
+ [Test]
+ public void TestOpenNewBtreeDB()
+ {
+ testName = "TestOpenNewBtreeDB";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ BTreeDatabaseConfig btreeConfig =
+ new BTreeDatabaseConfig();
+ BTreeDatabaseConfigTest.Config(xmlElem,
+ ref btreeConfig, true);
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeConfig);
+ Confirm(xmlElem, btreeDB, true);
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestOpenMulDBInSingleFile()
+ {
+ testName = "TestOpenMulDBInSingleFile";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string[] btreeDBArr = new string[4];
+
+ for (int i = 0; i < 4; i++)
+ btreeDBArr[i] = Path.GetFileNameWithoutExtension(
+ btreeDBFileName) + i;
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ BTreeDatabase btreeDB;
+ for (int i = 0; i < 4; i++)
+ {
+ btreeDB = BTreeDatabase.Open(btreeDBFileName,
+ btreeDBArr[i], btreeDBConfig);
+ Assert.AreEqual(CreatePolicy.IF_NEEDED, btreeDB.Creation);
+ btreeDB.Close();
+ }
+
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ Database db;
+ for (int i = 0; i < 4; i++)
+ {
+ using (db = Database.Open(btreeDBFileName,
+ btreeDBArr[i], dbConfig))
+ {
+ Assert.AreEqual(btreeDBArr[i],
+ db.DatabaseName);
+ Assert.AreEqual(DatabaseType.BTREE,
+ db.Type);
+ }
+ }
+ }
+
+ [Test]
+ public void TestOpenWithTxn()
+ {
+ testName = "TestOpenWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBName = testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ Transaction txn = env.BeginTransaction(
+ new TransactionConfig());
+
+ BTreeDatabaseConfig btreeConfig =
+ new BTreeDatabaseConfig();
+ btreeConfig.Creation = CreatePolicy.ALWAYS;
+ btreeConfig.Env = env;
+
+ /*
+ * If environmnet home is set, the file name in Open()
+ * is the relative path.
+ */
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBName, btreeConfig, txn);
+ Assert.IsTrue(btreeDB.Transactional);
+ btreeDB.Close();
+ txn.Commit();
+ env.Close();
+ }
+
+ [Test]
+ public void TestPrefixCompare()
+ {
+ testName = "TestPrefixCompare";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Duplicates = DuplicatesPolicy.SORTED;
+ dbConfig.BTreeCompare =
+ new EntryComparisonDelegate(dbIntCompare);
+ dbConfig.BTreePrefixCompare =
+ new EntryComparisonDelegate(dbIntCompare);
+ BTreeDatabase db = BTreeDatabase.Open(
+ btreeDBFileName, dbConfig);
+
+ Assert.Greater(0, db.PrefixCompare(new DatabaseEntry(
+ BitConverter.GetBytes(255)), new DatabaseEntry(
+ BitConverter.GetBytes(257))));
+
+ Assert.AreEqual(0, db.PrefixCompare(new DatabaseEntry(
+ BitConverter.GetBytes(255)), new DatabaseEntry(
+ BitConverter.GetBytes(255))));
+
+ db.Close();
+ }
+
+ [Test]
+ public void TestPutWithoutTxn()
+ {
+ testName = "TestPutWithoutTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ using (BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig))
+ {
+ // Put integer into database
+ key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ btreeDB.Put(key, data);
+ pair = btreeDB.Get(key);
+ Assert.AreEqual(key.Data, pair.Key.Data);
+ Assert.AreEqual(data.Data, pair.Value.Data);
+ }
+ }
+
+ [Test, ExpectedException(typeof(KeyExistException))]
+ public void TestPutNoOverWrite()
+ {
+ testName = "TestPutNoOverWrite";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ DatabaseEntry key, data, newData;
+ using (BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig))
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ newData = new DatabaseEntry(
+ BitConverter.GetBytes((int)1));
+
+ btreeDB.Put(key, data);
+ btreeDB.PutNoOverwrite(key, newData);
+ }
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestPutNoDuplicateWithTxn()
+ {
+ testName = "TestPutNoDuplicateWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ Transaction txn = env.BeginTransaction();
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.Env = env;
+ btreeDBConfig.Duplicates = DuplicatesPolicy.SORTED;
+
+ DatabaseEntry key, data;
+ try
+ {
+ using (BTreeDatabase btreeDB =
+ BTreeDatabase.Open(
+ testName + ".db", btreeDBConfig, txn))
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+
+ btreeDB.Put(key, data, txn);
+ btreeDB.PutNoDuplicate(key, data, txn);
+ }
+ txn.Commit();
+ }
+ catch (KeyExistException)
+ {
+ txn.Abort();
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestRemoveDBFile()
+ {
+ testName = "TestRemoveDBFile";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ RemoveDBWithoutEnv(testHome, testName, false);
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestRemoveOneDBFromDBFile()
+ {
+ testName = "TestRemoveOneDBFromDBFile";
+ testHome = testFixtureHome + "/" + testName;
+
+ RemoveDBWithoutEnv(testHome, testName, true);
+ }
+
+ public void RemoveDBWithoutEnv(string home, string dbName, bool ifDBName)
+ {
+ string dbFileName = home + "/" + dbName + ".db";
+
+ Configuration.ClearDir(home);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ BTreeDatabase btreeDB;
+ if (ifDBName == false)
+ {
+ btreeDB = BTreeDatabase.Open(dbFileName, btreeDBConfig);
+ btreeDB.Close();
+ BTreeDatabase.Remove(dbFileName);
+ throw new ExpectedTestException();
+ }
+ else
+ {
+ btreeDB = BTreeDatabase.Open(dbFileName, dbName, btreeDBConfig);
+ btreeDB.Close();
+ BTreeDatabase.Remove(dbFileName, dbName);
+ Assert.IsTrue(File.Exists(dbFileName));
+ try
+ {
+ btreeDB = BTreeDatabase.Open(dbFileName, dbName, new BTreeDatabaseConfig());
+ btreeDB.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new ExpectedTestException();
+ }
+ }
+ }
+
+ [Test]
+ public void TestRemoveDBFromFileInEnv()
+ {
+ testName = "TestRemoveDBFromFileInEnv";
+ testHome = testFixtureHome + "/" + testName;
+
+ RemoveDatabase(testHome, testName, true, true);
+ }
+
+ [Test]
+ public void TestRemoveDBFromEnv()
+ {
+ testName = "TestRemoveDBFromEnv";
+ testHome = testFixtureHome + "/" + testName;
+
+ RemoveDatabase(testHome, testName, true, false);
+ }
+
+ public void RemoveDatabase(string home, string dbName,
+ bool ifEnv, bool ifDBName)
+ {
+ string dbFileName = dbName + ".db";
+
+ Configuration.ClearDir(home);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseCDB = true;
+ envConfig.UseMPool = true;
+
+ DatabaseEnvironment env;
+ env = DatabaseEnvironment.Open(home, envConfig);
+
+ try
+ {
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.Env = env;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, dbName, btreeDBConfig);
+ btreeDB.Close();
+
+ if (ifEnv == true && ifDBName == true)
+ BTreeDatabase.Remove(dbFileName, dbName, env);
+ else if (ifEnv == true)
+ BTreeDatabase.Remove(dbFileName, env);
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ try
+ {
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.NEVER;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ dbFileName, dbName, dbConfig);
+ Assert.AreEqual(db.Creation, CreatePolicy.NEVER);
+ db.Close();
+ throw new TestException();
+ }
+ catch (DatabaseException)
+ {
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+ }
+
+ [Test]
+ public void TestRenameDB()
+ {
+ testName = "TestRenameDB";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" + testName + ".db";
+ string btreeDBName = testName;
+ string newBtreeDBName = btreeDBName + "1" + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ try
+ {
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBName, btreeDBConfig);
+ btreeDB.Close();
+ BTreeDatabase.Rename(btreeDBFileName,
+ btreeDBName, newBtreeDBName);
+
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.NEVER;
+ BTreeDatabase newDB = BTreeDatabase.Open(
+ btreeDBFileName, newBtreeDBName, dbConfig);
+ newDB.Close();
+ }
+ catch (DatabaseException e)
+ {
+ throw new TestException(e.Message);
+ }
+ finally
+ {
+ try
+ {
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.NEVER;
+ BTreeDatabase db = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBName,
+ dbConfig);
+ throw new TestException(testName);
+ }
+ catch (DatabaseException)
+ {
+ }
+ }
+ }
+
+ [Test]
+ public void TestRenameDBFile()
+ {
+ testName = "TestRenameDB";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string newBtreeDBFileName = testHome + "/" +
+ testName + "1.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig);
+ btreeDB.Close();
+
+ BTreeDatabase.Rename(btreeDBFileName,
+ newBtreeDBFileName);
+ Assert.IsFalse(File.Exists(btreeDBFileName));
+ Assert.IsTrue(File.Exists(newBtreeDBFileName));
+ }
+
+ [Test]
+ public void TestSync()
+ {
+ testName = "TestSync";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBName = testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.ForceFlush = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.LogSystemCfg = new LogConfig();
+ envConfig.LogSystemCfg.ForceSync = false;
+ envConfig.LogSystemCfg.AutoRemove = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ TransactionConfig txnConfig = new TransactionConfig();
+ txnConfig.SyncAction =
+ TransactionConfig.LogFlush.WRITE_NOSYNC;
+ Transaction txn = env.BeginTransaction(txnConfig);
+
+ BTreeDatabaseConfig btreeConfig =
+ new BTreeDatabaseConfig();
+ btreeConfig.Creation = CreatePolicy.ALWAYS;
+ btreeConfig.Env = env;
+
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBName, btreeConfig, txn);
+
+ DatabaseEntry key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ Assert.IsFalse(btreeDB.Exists(key, txn));
+ btreeDB.Put(key, data, txn);
+ btreeDB.Sync();
+ btreeDB.Close(false);
+ txn.Commit();
+ env.Close();
+
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.NEVER;
+ using (BTreeDatabase db = BTreeDatabase.Open(
+ testHome + "/" + btreeDBName, dbConfig))
+ {
+ Assert.IsTrue(db.Exists(key));
+ }
+ }
+
+ [Test]
+ public void TestTruncate()
+ {
+ testName = "TestTruncate";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ btreeDBConfig.CacheSize =
+ new CacheInfo(0, 30 * 1024, 1);
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig);
+ DatabaseEntry key;
+ DatabaseEntry data;
+ for (int i = 0; i < 100; i++)
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ btreeDB.Put(key, data);
+ }
+ uint count = btreeDB.Truncate();
+ Assert.AreEqual(100, count);
+ Assert.IsFalse(btreeDB.Exists(new DatabaseEntry(
+ BitConverter.GetBytes((int)50))));
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestTruncateInTxn()
+ {
+ testName = "TestTruncateInTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ try
+ {
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ testName + ".db", dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction putTxn = env.BeginTransaction();
+ try
+ {
+ DatabaseEntry key, data;
+ for (int i = 0; i < 10; i++)
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ db.Put(key, data, putTxn);
+ }
+
+ putTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ putTxn.Abort();
+ db.Close();
+ throw e;
+ }
+
+ Transaction trunTxn = env.BeginTransaction();
+ try
+ {
+ uint count = db.Truncate(trunTxn);
+ Assert.AreEqual(10, count);
+ Assert.IsFalse(db.Exists(
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)5)), trunTxn));
+ trunTxn.Commit();
+ db.Close();
+ }
+ catch (DatabaseException)
+ {
+ trunTxn.Abort();
+ db.Close();
+ throw new TestException();
+ }
+ }
+ catch (DatabaseException)
+ {
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestTruncateUnusedPages()
+ {
+ testName = "TestTruncateUnusedPages";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ dbConfig.PageSize = 512;
+ BTreeDatabase db = BTreeDatabase.Open(
+ dbFileName, dbConfig);
+ DatabaseEntry key, data;
+ for (int i = 0; i < 100; i++)
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ db.Put(key, data);
+ }
+
+ for (int i = 0; i < 80; i++)
+ db.Delete(new DatabaseEntry(
+ BitConverter.GetBytes(i)));
+
+ uint count = db.TruncateUnusedPages();
+ Assert.LessOrEqual(0, count);
+
+ db.Close();
+ }
+
+ [Test]
+ public void TestTruncateUnusedPagesWithTxn()
+ {
+ testName = "TestTruncateUnusedPagesWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ BTreeDatabase db;
+ try
+ {
+ Transaction openTxn = env.BeginTransaction();
+ try
+ {
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ dbConfig.PageSize = 512;
+ db = BTreeDatabase.Open(
+ testName + ".db", dbConfig, openTxn);
+ openTxn.Commit();
+ Assert.AreEqual(512, db.Pagesize);
+ }
+ catch (DatabaseException e)
+ {
+ openTxn.Abort();
+ throw e;
+ }
+
+ Transaction putTxn = env.BeginTransaction();
+ try
+ {
+ DatabaseEntry key, data;
+ for (int i = 0; i < 100; i++)
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ db.Put(key, data, putTxn);
+ }
+
+ putTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ putTxn.Abort();
+ db.Close();
+ throw e;
+ }
+
+ Transaction delTxn = env.BeginTransaction();
+ try
+ {
+ for (int i = 20; i <= 80; i++)
+ db.Delete(new DatabaseEntry(
+ BitConverter.GetBytes(i)), delTxn);
+ delTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ delTxn.Abort();
+ db.Close();
+ throw e;
+ }
+
+ Transaction trunTxn = env.BeginTransaction();
+ try
+ {
+ uint trunPages = db.TruncateUnusedPages(
+ trunTxn);
+ Assert.LessOrEqual(0, trunPages);
+ trunTxn.Commit();
+ db.Close();
+ }
+ catch (DatabaseException)
+ {
+ trunTxn.Abort();
+ db.Close();
+ throw new Exception();
+ }
+ }
+ catch (DatabaseException)
+ {
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestSalvage()
+ {
+ testName = "TestSalvage";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string printableOutPut = testHome + "/" +
+ "printableOutPut";
+ string inprintableOutPut = testHome + "/" +
+ "inprintableOutPut";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBConfig);
+
+ DatabaseEntry key;
+ DatabaseEntry data;
+
+ for (uint i = 0; i < 10; i++)
+ {
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes(i.ToString()));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes(i.ToString()));
+ btreeDB.Put(key, data);
+ }
+
+ btreeDB.Close();
+
+ StreamWriter sw1 = new StreamWriter(printableOutPut);
+ StreamWriter sw2 = new StreamWriter(inprintableOutPut);
+ BTreeDatabase.Salvage(btreeDBFileName, btreeDBConfig,
+ true, true, sw1);
+ BTreeDatabase.Salvage(btreeDBFileName, btreeDBConfig,
+ false, true, sw2);
+ sw1.Close();
+ sw2.Close();
+
+ FileStream file1 = new FileStream(printableOutPut,
+ FileMode.Open);
+ FileStream file2 = new FileStream(inprintableOutPut,
+ FileMode.Open);
+ if (file1.Length == file2.Length)
+ {
+ int filebyte1 = 0;
+ int filebyte2 = 0;
+ do
+ {
+ filebyte1 = file1.ReadByte();
+ filebyte2 = file2.ReadByte();
+ } while ((filebyte1 == filebyte2) &&
+ (filebyte1 != -1));
+ Assert.AreNotEqual(filebyte1, filebyte2);
+ }
+
+ file1.Close();
+ file2.Close();
+ }
+
+ [Test]
+ public void TestUpgrade()
+ {
+ testName = "TestUpgrade";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ string srcDBFileName = "../../bdb4.7.db";
+ string testDBFileName = testHome + "/bdb4.7.db";
+
+ FileInfo srcDBFileInfo = new FileInfo(srcDBFileName);
+
+ //Copy the file.
+ srcDBFileInfo.CopyTo(testDBFileName);
+ Assert.IsTrue(File.Exists(testDBFileName));
+
+ BTreeDatabase.Upgrade(testDBFileName,
+ new DatabaseConfig(), true);
+
+ // Open the upgraded database file.
+ BTreeDatabase db = BTreeDatabase.Open(
+ testDBFileName, new BTreeDatabaseConfig());
+ db.Close();
+ }
+
+ [Test, ExpectedException(typeof(TestException))]
+ public void TestVerify()
+ {
+ testName = "TestVerify";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBFileName = testHome + "/" +
+ testName + ".db";
+ string btreeDBName =
+ Path.GetFileNameWithoutExtension(btreeDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ btreeDBFileName, btreeDBName, btreeDBConfig);
+ btreeDB.Close();
+ btreeDBConfig.Duplicates = DuplicatesPolicy.SORTED;
+ BTreeDatabase.Verify(btreeDBFileName, btreeDBConfig,
+ Database.VerifyOperation.NO_ORDER_CHECK);
+ try
+ {
+ BTreeDatabase.Verify(btreeDBFileName,
+ btreeDBConfig,
+ Database.VerifyOperation.ORDER_CHECK_ONLY);
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException(testName);
+ }
+ finally
+ {
+ BTreeDatabase.Verify(btreeDBFileName,
+ btreeDBName, btreeDBConfig,
+ Database.VerifyOperation.ORDER_CHECK_ONLY);
+ }
+
+ }
+
+ [Test]
+ public void TestStats()
+ {
+ testName = "TestStats";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" +
+ testName + ".db";
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ ConfigCase1(dbConfig);
+ BTreeDatabase db = BTreeDatabase.Open(dbFileName,
+ dbConfig);
+
+ BTreeStats stats = db.Stats();
+ ConfirmStatsPart1Case1(stats);
+
+ // Put 500 records into the database.
+ PutRecordCase1(db, null);
+
+ stats = db.Stats();
+ ConfirmStatsPart2Case1(stats);
+
+ // Delete some data to get some free pages.
+ byte[] bigArray = new byte[10240];
+ db.Delete(new DatabaseEntry(bigArray));
+
+ db.PrintStats();
+ db.PrintFastStats();
+
+ db.Close();
+ }
+
+ [Test]
+ public void TestStatsInTxn()
+ {
+ testName = "TestStatsInTxn";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, false);
+ }
+
+ [Test]
+ public void TestStatsWithIsolation()
+ {
+ testName = "TestStatsWithIsolation";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, true);
+ }
+
+ [Test]
+ public void TestMultipleDBSingleFile()
+ {
+ testName = "TestMultipleDBSingleFile";
+ testHome = testFixtureHome + "/" + testName;
+ string btreeDBName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ string dbName = "test";
+
+ /* Create and initialize database object, open the database. */
+ BTreeDatabaseConfig btreeDBconfig = new BTreeDatabaseConfig();
+ btreeDBconfig.Creation = CreatePolicy.IF_NEEDED;
+ btreeDBconfig.ErrorPrefix = testName;
+ btreeDBconfig.UseRecordNumbers = true;
+
+ BTreeDatabase btreeDB = BTreeDatabase.Open(btreeDBName, dbName,
+ btreeDBconfig);
+ btreeDB.Close();
+ btreeDB = BTreeDatabase.Open(btreeDBName, dbName + "2",
+ btreeDBconfig);
+ btreeDB.Close();
+
+ BTreeDatabaseConfig dbcfg = new BTreeDatabaseConfig();
+ dbcfg.ReadOnly = true;
+ BTreeDatabase newDb = BTreeDatabase.Open(btreeDBName, dbcfg);
+ Boolean val = newDb.HasMultiple;
+ Assert.IsTrue(val);
+ }
+
+ public void StatsInTxn(string home, string name, bool ifIsolation)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ EnvConfigCase1(envConfig);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ ConfigCase1(dbConfig);
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(name + ".db",
+ dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction statsTxn = env.BeginTransaction();
+ BTreeStats stats;
+ BTreeStats fastStats;
+ if (ifIsolation == false)
+ {
+ stats = db.Stats(statsTxn);
+ fastStats = db.FastStats(statsTxn);
+ }
+ else
+ {
+ stats = db.Stats(statsTxn, Isolation.DEGREE_ONE);
+ fastStats = db.FastStats(statsTxn,
+ Isolation.DEGREE_ONE);
+ }
+ ConfirmStatsPart1Case1(stats);
+
+ // Put 500 records into the database.
+ PutRecordCase1(db, statsTxn);
+
+ if (ifIsolation == false)
+ stats = db.Stats(statsTxn);
+ else
+ stats = db.Stats(statsTxn, Isolation.DEGREE_TWO);
+ ConfirmStatsPart2Case1(stats);
+
+ // Delete some data to get some free pages.
+ byte[] bigArray = new byte[10240];
+ db.Delete(new DatabaseEntry(bigArray), statsTxn);
+ if (ifIsolation == false)
+ stats = db.Stats(statsTxn);
+ else
+ stats = db.Stats(statsTxn, Isolation.DEGREE_THREE);
+ ConfirmStatsPart3Case1(stats);
+
+ db.PrintStats(true);
+ Assert.AreEqual(0, stats.EmptyPages);
+
+ statsTxn.Commit();
+ db.Close();
+ env.Close();
+ }
+
+ public void EnvConfigCase1(DatabaseEnvironmentConfig cfg)
+ {
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ }
+
+ public void ConfigCase1(BTreeDatabaseConfig dbConfig)
+ {
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ dbConfig.PageSize = 4096;
+ dbConfig.MinKeysPerPage = 10;
+ }
+
+ public void PutRecordCase1(BTreeDatabase db, Transaction txn)
+ {
+ byte[] bigArray = new byte[10240];
+ for (int i = 0; i < 100; i++)
+ {
+ if (txn == null)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+ else
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)), txn);
+ }
+ for (int i = 100; i < 500; i++)
+ {
+ if (txn == null)
+ db.Put(new DatabaseEntry(bigArray),
+ new DatabaseEntry(bigArray));
+ else
+ db.Put(new DatabaseEntry(bigArray),
+ new DatabaseEntry(bigArray), txn);
+ }
+ }
+
+ public void ConfirmStatsPart1Case1(BTreeStats stats)
+ {
+ Assert.AreEqual(1, stats.EmptyPages);
+ Assert.AreNotEqual(0, stats.LeafPagesFreeBytes);
+ Assert.AreEqual(1, stats.Levels);
+ Assert.AreNotEqual(0, stats.MagicNumber);
+ Assert.AreEqual(1, stats.MetadataFlags);
+ Assert.AreEqual(10, stats.MinKey);
+ Assert.AreEqual(2, stats.nPages);
+ Assert.AreEqual(4096, stats.PageSize);
+ Assert.AreEqual(9, stats.Version);
+ }
+
+ public void ConfirmStatsPart2Case1(BTreeStats stats)
+ {
+ Assert.AreNotEqual(0, stats.DuplicatePages);
+ Assert.AreNotEqual(0, stats.DuplicatePagesFreeBytes);
+ Assert.AreNotEqual(0, stats.InternalPages);
+ Assert.AreNotEqual(0, stats.InternalPagesFreeBytes);
+ Assert.AreNotEqual(0, stats.LeafPages);
+ Assert.AreEqual(500, stats.nData);
+ Assert.AreEqual(101, stats.nKeys);
+ Assert.AreNotEqual(0, stats.OverflowPages);
+ Assert.AreNotEqual(0, stats.OverflowPagesFreeBytes);
+ }
+
+ public void ConfirmStatsPart3Case1(BTreeStats stats)
+ {
+ Assert.AreNotEqual(0, stats.FreePages);
+ }
+
+ private int dbIntCompare(DatabaseEntry dbt1,
+ DatabaseEntry dbt2)
+ {
+ int a, b;
+ a = BitConverter.ToInt32(dbt1.Data, 0);
+ b = BitConverter.ToInt32(dbt2.Data, 0);
+ return a - b;
+ }
+
+ public void SetUpEnvAndTxn(string home,
+ out DatabaseEnvironment env, out Transaction txn)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ env = DatabaseEnvironment.Open(home, envConfig);
+ txn = env.BeginTransaction();
+ }
+
+ public void OpenBtreeDB(DatabaseEnvironment env,
+ Transaction txn, string dbFileName,
+ out BTreeDatabase db)
+ {
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ if (env != null)
+ {
+ dbConfig.Env = env;
+ dbConfig.NoMMap = false;
+ db = BTreeDatabase.Open(dbFileName, dbConfig, txn);
+ }
+ else
+ {
+ db = BTreeDatabase.Open(dbFileName, dbConfig);
+ }
+ }
+
+ private BTreeDatabase GetMultipleDB(
+ string filename, string dbname, BTreeDatabaseConfig cfg) {
+ BTreeDatabase ret;
+ DatabaseEntry data, key;
+
+ ret = BTreeDatabase.Open(filename, dbname, cfg);
+ key = null;
+ if (cfg.UseRecordNumbers) {
+ /*
+ * Dups aren't allowed with record numbers, so
+ * we have to put different data. Also, record
+ * numbers start at 1, so we do too, which makes
+ * checking results easier.
+ */
+ for (int i = 1; i < 100; i++) {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ ret.Put(key, data);
+ }
+
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(100));
+ data = new DatabaseEntry();
+ data.Data = new byte[111];
+ for (int i = 0; i < 111; i++)
+ data.Data[i] = (byte)i;
+ ret.Put(key, data);
+ } else {
+ for (int i = 0; i < 100; i++) {
+ if (i % 10 == 0)
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ /* Don't put nulls into the db. */
+ Assert.IsFalse(key == null);
+ Assert.IsFalse(data == null);
+ ret.Put(key, data);
+ }
+
+ if (cfg.Duplicates == DuplicatesPolicy.UNSORTED) {
+ /* Add in duplicates to check GetBothMultiple */
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(100));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(100));
+ for (int i = 0; i < 10; i++)
+ ret.Put(key, data);
+
+ /*
+ * Add duplicates to check GetMultiple
+ * with given buffer size.
+ */
+ for (int i = 101; i < 1024; i++) {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(101));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ ret.Put(key, data);
+ }
+
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(102));
+ data = new DatabaseEntry();
+ data.Data = new byte[112];
+ for (int i = 0; i < 112; i++)
+ data.Data[i] = (byte)i;
+ ret.Put(key, data);
+ }
+ }
+ return ret;
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ BTreeDatabase btreeDB, bool compulsory)
+ {
+ DatabaseTest.Confirm(xmlElem, btreeDB, compulsory);
+ Configuration.ConfirmDuplicatesPolicy(xmlElem,
+ "Duplicates", btreeDB.Duplicates, compulsory);
+ Configuration.ConfirmUint(xmlElem, "MinKeysPerPage",
+ btreeDB.MinKeysPerPage, compulsory);
+ /*
+ * BTreeDatabase.RecordNumbers is the value of
+ * BTreeDatabaseConfig.UseRecordNumbers.
+ */
+ Configuration.ConfirmBool(xmlElem, "UseRecordNumbers",
+ btreeDB.RecordNumbers, compulsory);
+ /*
+ * BTreeDatabase.ReverseSplit is the value of
+ * BTreeDatabaseConfig.NoReverseSplitting.
+ */
+ Configuration.ConfirmBool(xmlElem, "NoReverseSplitting",
+ btreeDB.ReverseSplit, compulsory);
+ Assert.AreEqual(DatabaseType.BTREE, btreeDB.Type);
+ string type = btreeDB.ToString();
+ Assert.IsNotNull(type);
+ }
+ }
+}
+
diff --git a/db-4.8.30/test/scr037/Configuration.cs b/db-4.8.30/test/scr037/Configuration.cs
new file mode 100644
index 0000000..abf0079
--- /dev/null
+++ b/db-4.8.30/test/scr037/Configuration.cs
@@ -0,0 +1,1107 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using BerkeleyDB;
+using NUnit.Framework;
+
+namespace CsharpAPITest
+{
+ public class Configuration
+ {
+ /*
+ * Configure the value with data in xml and return true or
+ * false to indicate if the value is configured. If there
+ * is no testing data and it is optional, return false. If
+ * there is no testing data in xml and it is compulsory,
+ * ConfigNotFoundException will be thrown. If any testing
+ * data is provided, the value will be set by the testing
+ * data and true will be returned.
+ */
+ #region Config
+ public static void ConfigAckPolicy(XmlElement xmlElem,
+ string name, ref AckPolicy ackPolicy, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem,
+ name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ string policy = xmlNode.InnerText;
+ if (policy == "ALL")
+ ackPolicy = AckPolicy.ALL;
+ else if (policy == "ALL_PEERS")
+ ackPolicy = AckPolicy.ALL_PEERS;
+ else if (policy == "NONE")
+ ackPolicy = AckPolicy.NONE;
+ else if (policy == "ONE")
+ ackPolicy = AckPolicy.ONE;
+ else if (policy == "ONE_PEER")
+ ackPolicy = AckPolicy.ONE_PEER;
+ else if (policy == "QUORUM")
+ ackPolicy = AckPolicy.QUORUM;
+ else
+ throw new InvalidConfigException(name);
+ }
+ }
+
+ public static bool ConfigBool(XmlElement xmlElem,
+ string name, ref bool value, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ value = bool.Parse(xmlNode.InnerText);
+ return true;
+ }
+
+ public static bool ConfigByteOrder(XmlElement xmlElem,
+ string name, ref ByteOrder byteOrder, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ byteOrder = ByteOrder.FromConst(
+ int.Parse(xmlNode.InnerText));
+ return true;
+ }
+
+ public static bool ConfigByteMatrix(XmlElement xmlElem,
+ string name, ref byte[,] byteMatrix, bool compulsory)
+ {
+ int i, j, matrixLen;
+
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ matrixLen = xmlNode.ChildNodes.Count;
+ byte[,] matrix = new byte[matrixLen, matrixLen];
+ for (i = 0; i < matrixLen; i++)
+ {
+ if (xmlNode.ChildNodes[i].ChildNodes.Count != matrixLen)
+ throw new ConfigNotFoundException(name);
+ for (j = 0; j < matrixLen; j++)
+ {
+ matrix[i, j] = byte.Parse(
+ xmlNode.ChildNodes[i].ChildNodes[j].InnerText);
+ }
+ }
+
+ byteMatrix = matrix;
+ return true;
+ }
+
+ public static bool ConfigCacheInfo(XmlElement xmlElem,
+ string name, ref CacheInfo cacheSize, bool compulsory)
+ {
+ XmlNode xmlNode;
+ XmlNode xmlChildNode;
+ uint bytes;
+ uint gigabytes;
+ int nCaches;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Bytes")) != null)
+ {
+ bytes = uint.Parse(xmlChildNode.InnerText);
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Gigabytes")) != null)
+ {
+ gigabytes = uint.Parse(xmlChildNode.InnerText);
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "NCaches")) != null)
+ {
+ nCaches = int.Parse(xmlChildNode.InnerText);
+ cacheSize = new CacheInfo(gigabytes,bytes,nCaches);
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public static bool ConfigCachePriority(XmlElement xmlElem,
+ string name, ref CachePriority cachePriority, bool compulsory)
+ {
+ XmlNode xmlNode;
+ string priority;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ priority = xmlNode.InnerText;
+ if (priority == "DEFAULT")
+ cachePriority = CachePriority.DEFAULT;
+ else if (priority == "HIGH")
+ cachePriority = CachePriority.HIGH;
+ else if (priority == "LOW")
+ cachePriority = CachePriority.LOW;
+ else if (priority == "VERY_HIGH")
+ cachePriority = CachePriority.VERY_HIGH;
+ else if (priority == "VERY_LOW")
+ cachePriority = CachePriority.VERY_LOW;
+ else
+ throw new InvalidConfigException(name);
+
+ return true;
+ }
+
+ public static bool ConfigCreatePolicy(XmlElement xmlElem,
+ string name, ref CreatePolicy createPolicy, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ if (xmlNode.InnerText == "ALWAYS")
+ createPolicy = CreatePolicy.ALWAYS;
+ else if (xmlNode.InnerText == "IF_NEEDED")
+ createPolicy = CreatePolicy.IF_NEEDED;
+ else if (xmlNode.InnerText == "NEVER")
+ createPolicy = CreatePolicy.NEVER;
+ else
+ throw new InvalidConfigException(name);
+
+ return true;
+ }
+
+ public static bool ConfigDuplicatesPolicy(XmlElement xmlElem,
+ string name, ref DuplicatesPolicy duplicatePolicy,bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ if (xmlNode.InnerText == "NONE")
+ duplicatePolicy = DuplicatesPolicy.NONE;
+ else if (xmlNode.InnerText == "SORTED")
+ duplicatePolicy = DuplicatesPolicy.SORTED;
+ else if (xmlNode.InnerText == "UNSORTED")
+ duplicatePolicy = DuplicatesPolicy.UNSORTED;
+ else
+ throw new InvalidConfigException(name);
+
+ return true;
+ }
+
+ public static bool ConfigDateTime(XmlElement xmlElem,
+ string name, ref DateTime time, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ time = DateTime.Parse(xmlNode.InnerText);
+ return true;
+ }
+
+ public static bool ConfigDeadlockPolicy(XmlElement xmlElem,
+ string name, ref DeadlockPolicy deadlockPolicy, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ string policy = xmlNode.InnerText;
+ if (policy == "DEFAULT")
+ deadlockPolicy = DeadlockPolicy.DEFAULT;
+ else if (policy == "EXPIRE")
+ deadlockPolicy = DeadlockPolicy.EXPIRE;
+ else if (policy == "MAX_LOCKS")
+ deadlockPolicy = DeadlockPolicy.MAX_LOCKS;
+ else if (policy == "MAX_WRITE")
+ deadlockPolicy = DeadlockPolicy.MAX_WRITE;
+ else if (policy == "MIN_LOCKS")
+ deadlockPolicy = DeadlockPolicy.MIN_LOCKS;
+ else if (policy == "MIN_WRITE")
+ deadlockPolicy = DeadlockPolicy.MIN_WRITE;
+ else if (policy == "OLDEST")
+ deadlockPolicy = DeadlockPolicy.OLDEST;
+ else if (policy == "RANDOM")
+ deadlockPolicy = DeadlockPolicy.RANDOM;
+ else if (policy == "YOUNGEST")
+ deadlockPolicy = DeadlockPolicy.YOUNGEST;
+ else
+ throw new InvalidConfigException(name);
+ return true;
+ }
+
+ public static bool ConfigEncryption(XmlElement xmlElem,
+ string name, DatabaseConfig dbConfig, bool compulsory)
+ {
+ EncryptionAlgorithm alg;
+ XmlNode xmlNode;
+ string tmp, password;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ password = XMLReader.GetNode((XmlElement)xmlNode,
+ "password").InnerText;
+ tmp = XMLReader.GetNode((XmlElement)xmlNode, "algorithm").InnerText;
+ if (tmp == "AES")
+ alg = EncryptionAlgorithm.AES;
+ else
+ alg = EncryptionAlgorithm.DEFAULT;
+ dbConfig.SetEncryption(password, alg);
+ return true;
+ }
+
+ public static bool ConfigInt(XmlElement xmlElem,
+ string name, ref int value, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ value = int.Parse(xmlNode.InnerText);
+ return true;
+ }
+
+ public static bool ConfigIsolation(XmlElement xmlElem,
+ string name, ref Isolation value, bool compulsory)
+ {
+ XmlNode xmlNode;
+ int isolationDegree;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ isolationDegree = int.Parse(xmlNode.InnerText);
+ if (isolationDegree == 1)
+ value = Isolation.DEGREE_ONE;
+ else if (isolationDegree == 2)
+ value = Isolation.DEGREE_TWO;
+ else if (isolationDegree == 3)
+ value = Isolation.DEGREE_THREE;
+ else
+ throw new InvalidConfigException(name);
+
+ return true;
+ }
+
+ public static bool ConfigLogFlush(XmlElement xmlElem,
+ string name, ref TransactionConfig.LogFlush value,
+ bool compulsory)
+ {
+ XmlNode xmlNode;
+ string logFlush;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ logFlush = xmlNode.InnerText;
+ if (logFlush == "DEFAULT")
+ value = TransactionConfig.LogFlush.DEFAULT;
+ else if (logFlush == "NOSYNC")
+ value = TransactionConfig.LogFlush.NOSYNC;
+ else if (logFlush == "WRITE_NOSYNC")
+ value = TransactionConfig.LogFlush.WRITE_NOSYNC;
+ else if (logFlush == "SYNC")
+ value = TransactionConfig.LogFlush.SYNC;
+ else
+ throw new InvalidConfigException(name);
+
+ return true;
+ }
+
+ public static bool ConfigLong(XmlElement xmlElem,
+ string name, ref long value, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ value = long.Parse(xmlNode.InnerText);
+ return true;
+ }
+
+ public static bool ConfigMaxSequentialWrites(
+ XmlElement xmlElem, string name,
+ MPoolConfig mpoolConfig, bool compulsory)
+ {
+ XmlNode xmlNode;
+ uint pause;
+ int writes;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ pause = uint.Parse(XMLReader.GetNode(
+ (XmlElement)xmlNode, "pause").InnerText);
+ writes = int.Parse(XMLReader.GetNode(
+ (XmlElement)xmlNode,"maxWrites").InnerText);
+ mpoolConfig.SetMaxSequentialWrites(writes, pause);
+ return true;
+ }
+
+ public static bool ConfigReplicationHostAddress(
+ XmlElement xmlElem, string name,
+ ref ReplicationHostAddress address, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(
+ xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ address.Host = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Host").InnerText;
+ address.Port = uint.Parse(XMLReader.GetNode(
+ (XmlElement)xmlNode, "Port").InnerText);
+ return true;
+ }
+
+ public static bool ConfigString(XmlElement xmlElem,
+ string name, ref string valChar, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ valChar = xmlNode.InnerText;
+ return true;
+ }
+
+ public static bool ConfigStringList(XmlElement xmlElem,
+ string name, ref List<string> strings, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ XmlNodeList list = xmlNode.ChildNodes;
+ for (int i = 0; i < list.Count; i++)
+ strings.Add(list[i].InnerText);
+
+ return true;
+ }
+
+ public static bool ConfigUint(XmlElement xmlElem,
+ string name, ref uint value, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ value = uint.Parse(xmlNode.InnerText);
+ return true;
+ }
+
+ public static bool ConfigVerboseMessages(
+ XmlElement xmlElem, string name,
+ ref VerboseMessages verbose, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem,
+ name);
+ if (xmlNode == null && compulsory == false)
+ return false;
+ else if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+
+ ConfigBool((XmlElement)xmlNode, "AllFileOps",
+ ref verbose.AllFileOps, compulsory);
+ ConfigBool((XmlElement)xmlNode, "Deadlock",
+ ref verbose.Deadlock, compulsory);
+ ConfigBool((XmlElement)xmlNode, "FileOps",
+ ref verbose.FileOps, compulsory);
+ ConfigBool((XmlElement)xmlNode, "Recovery",
+ ref verbose.Recovery, compulsory);
+ ConfigBool((XmlElement)xmlNode, "Register",
+ ref verbose.Register, compulsory);
+ ConfigBool((XmlElement)xmlNode, "Replication",
+ ref verbose.Replication, compulsory);
+ ConfigBool((XmlElement)xmlNode, "ReplicationElection",
+ ref verbose.ReplicationElection, compulsory);
+ ConfigBool((XmlElement)xmlNode, "ReplicationLease",
+ ref verbose.ReplicationLease, compulsory);
+ ConfigBool((XmlElement)xmlNode, "ReplicationMessages",
+ ref verbose.ReplicationMessages, compulsory);
+ ConfigBool((XmlElement)xmlNode, "ReplicationMisc",
+ ref verbose.ReplicationMisc, compulsory);
+ ConfigBool((XmlElement)xmlNode, "ReplicationSync",
+ ref verbose.ReplicationSync, compulsory);
+ ConfigBool((XmlElement)xmlNode, "RepMgrConnectionFailure",
+ ref verbose.RepMgrConnectionFailure, compulsory);
+ ConfigBool((XmlElement)xmlNode, "RepMgrMisc",
+ ref verbose.RepMgrMisc, compulsory);
+ ConfigBool((XmlElement)xmlNode, "WaitsForTable",
+ ref verbose.WaitsForTable, compulsory);
+ return true;
+ }
+
+ #endregion Config
+
+ /*
+ * Confirm that the given value is the same with that in
+ * xml. If there is no testing data in xml and it is
+ * compulsory, the ConfigNotFoundException will be thrown.
+ * If there is no testing data and it is optional, nothing
+ * will be done. If any testing data is provided, the value
+ * will be checked.
+ */
+ #region Confirm
+ public static void ConfirmAckPolicy(XmlElement xmlElem,
+ string name, AckPolicy ackPolicy, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem,
+ name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ string policy = xmlNode.InnerText;
+ if (policy == "ALL")
+ Assert.AreEqual(AckPolicy.ALL,
+ ackPolicy);
+ else if (policy == "ALL_PEERS")
+ Assert.AreEqual(AckPolicy.ALL_PEERS,
+ ackPolicy);
+ else if (policy == "NONE")
+ Assert.AreEqual(AckPolicy.NONE,
+ ackPolicy);
+ else if (policy == "ONE")
+ Assert.AreEqual(AckPolicy.ONE,
+ ackPolicy);
+ else if (policy == "ONE_PEER")
+ Assert.AreEqual(AckPolicy.ONE_PEER,
+ ackPolicy);
+ else if (policy == "QUORUM")
+ Assert.AreEqual(AckPolicy.QUORUM,
+ ackPolicy);
+ else
+ throw new InvalidConfigException(name);
+ }
+ }
+
+ public static void ConfirmBool(XmlElement xmlElem,
+ string name, bool value, bool compulsory)
+ {
+ XmlNode xmlNode;
+ bool expected;
+
+ xmlNode = XMLReader.GetNode(xmlElem,
+ name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if (xmlNode.ChildNodes.Count > 1)
+ {
+ expected = bool.Parse(
+ xmlNode.FirstChild.NextSibling.InnerText);
+ Assert.AreEqual(expected, value);
+ }
+ }
+ }
+
+ /*
+ * If configure MACHINE, the ByteOrder in database will
+ * switch to LITTLE_ENDIAN or BIG_ENDIAN according to the
+ * current machine.
+ */
+ public static void ConfirmByteOrder(XmlElement xmlElem,
+ string name, ByteOrder byteOrder, bool compulsory)
+ {
+ XmlNode xmlNode;
+ ByteOrder specOrder;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ specOrder = ByteOrder.FromConst(int.Parse(
+ xmlNode.InnerText));
+ if (specOrder == ByteOrder.MACHINE)
+ Assert.AreNotEqual(specOrder, byteOrder);
+ else
+ Assert.AreEqual(specOrder, byteOrder);
+ }
+ }
+
+ public static void ConfirmByteMatrix(XmlElement xmlElem,
+ string name, byte[,] byteMatrix, bool compulsory)
+ {
+ int i, j, matrixLen;
+
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ /*
+ * If the length of the 2 matrixes are not
+ * the same, the matrixes are definately
+ * not equal.
+ */
+ matrixLen = xmlNode.ChildNodes.Count;
+ Assert.AreEqual(matrixLen * matrixLen,byteMatrix.Length);
+
+ /*
+ * Go over every element in the matrix to
+ * see if the same with the given xml data.
+ */
+ for (i = 0; i < matrixLen; i++)
+ {
+ if (xmlNode.ChildNodes[i].ChildNodes.Count != matrixLen)
+ throw new ConfigNotFoundException(name);
+ for (j = 0; j < matrixLen; j++)
+ Assert.AreEqual(
+ byte.Parse(xmlNode.ChildNodes[i].ChildNodes[j].InnerText),
+ byteMatrix[i, j]);
+ }
+ }
+ }
+
+ public static void ConfirmCachePriority(XmlElement xmlElem,
+ string name, CachePriority priority, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if (xmlNode.InnerText == "DEFAULT")
+ Assert.AreEqual(CachePriority.DEFAULT, priority);
+ else if (xmlNode.InnerText == "HIGH")
+ Assert.AreEqual(CachePriority.HIGH, priority);
+ else if (xmlNode.InnerText == "LOW")
+ Assert.AreEqual(CachePriority.LOW, priority);
+ else if (xmlNode.InnerText == "VERY_HIGH")
+ Assert.AreEqual(CachePriority.VERY_HIGH, priority);
+ else if (xmlNode.InnerText == "VERY_LOW")
+ Assert.AreEqual(CachePriority.VERY_LOW, priority);
+ }
+ }
+
+ public static void ConfirmCacheSize(XmlElement xmlElem,
+ string name, CacheInfo cache, bool compulsory)
+ {
+ uint bytes;
+ uint gigabytes;
+ int nCaches;
+ XmlNode xmlNode;
+ XmlNode xmlChildNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Bytes")) != null)
+ {
+ bytes = uint.Parse(xmlChildNode.InnerText);
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Gigabytes")) != null)
+ {
+ gigabytes = uint.Parse(xmlChildNode.InnerText);
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode,
+ "NCaches")) != null)
+ {
+ nCaches = int.Parse(xmlChildNode.InnerText);
+ Assert.LessOrEqual(bytes, cache.Bytes);
+ Assert.AreEqual(gigabytes, cache.Gigabytes);
+ Assert.AreEqual(nCaches, cache.NCaches);
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ * If bytes in CacheSize is assigned, the bytes in cachesize
+ * couldn't be the default one.
+ */
+ public static void ConfirmCacheSize(XmlElement xmlElem,
+ string name, CacheInfo cache, uint defaultCache,
+ bool compulsory)
+ {
+ uint bytes;
+ uint gigabytes;
+ int nCaches;
+ XmlNode xmlNode;
+ XmlNode xmlChildNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Bytes")) != null)
+ {
+ bytes = defaultCache;
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Gigabytes")) != null)
+ {
+ gigabytes = uint.Parse(xmlChildNode.InnerText);
+ if ((xmlChildNode = XMLReader.GetNode(
+ (XmlElement)xmlNode, "NCaches")) != null)
+ {
+ nCaches = int.Parse(xmlChildNode.InnerText);
+ Assert.AreNotEqual(bytes, cache.Bytes);
+ Assert.AreEqual(gigabytes, cache.Gigabytes);
+ Assert.AreEqual(nCaches, cache.NCaches);
+ }
+ }
+ }
+ }
+ }
+
+ public static void ConfirmCreatePolicy(XmlElement xmlElem,
+ string name, CreatePolicy createPolicy, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if (xmlNode.InnerText == "ALWAYS")
+ Assert.IsTrue(createPolicy.Equals(CreatePolicy.ALWAYS));
+ else if (xmlNode.InnerText == "IF_NEEDED")
+ Assert.IsTrue(createPolicy.Equals(CreatePolicy.IF_NEEDED));
+ else if (xmlNode.InnerText == "NEVER")
+ Assert.IsTrue(createPolicy.Equals(CreatePolicy.NEVER));
+ }
+ }
+
+ public static void ConfirmDataBaseType(XmlElement xmlElem,
+ string name, DatabaseType dbType, bool compulsory)
+ {
+ XmlNode xmlNode;
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if (xmlNode.InnerText == "BTREE")
+ Assert.AreEqual(dbType, DatabaseType.BTREE);
+ else if (xmlNode.InnerText == "HASH")
+ Assert.AreEqual(dbType, DatabaseType.HASH);
+ else if (xmlNode.InnerText == "QUEUE")
+ Assert.AreEqual(dbType, DatabaseType.QUEUE);
+ else if (xmlNode.InnerText == "RECNO")
+ Assert.AreEqual(dbType, DatabaseType.RECNO);
+ else if (xmlNode.InnerText == "UNKNOWN")
+ Assert.AreEqual(dbType, DatabaseType.UNKNOWN);
+ }
+ }
+
+ public static void ConfirmDateTime(XmlElement xmlElem,
+ string name, DateTime time, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ Assert.AreEqual(DateTime.Parse(
+ xmlNode.InnerText), time);
+ }
+
+ public static void ConfirmDeadlockPolicy(XmlElement xmlElem,
+ string name, DeadlockPolicy deadlockPolicy, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ string policy = xmlNode.InnerText;
+ if (policy == "DEFAULT")
+ Assert.AreEqual(DeadlockPolicy.DEFAULT, deadlockPolicy);
+ else if (policy == "EXPIRE")
+ Assert.AreEqual(DeadlockPolicy.EXPIRE, deadlockPolicy);
+ else if (policy == "MAX_LOCKS")
+ Assert.AreEqual(DeadlockPolicy.MAX_LOCKS, deadlockPolicy);
+ else if (policy == "MAX_WRITE")
+ Assert.AreEqual(DeadlockPolicy.MAX_WRITE, deadlockPolicy);
+ else if (policy == "MIN_LOCKS")
+ Assert.AreEqual(DeadlockPolicy.MIN_LOCKS, deadlockPolicy);
+ else if (policy == "MIN_WRITE")
+ Assert.AreEqual(DeadlockPolicy.MIN_WRITE, deadlockPolicy);
+ else if (policy == "OLDEST")
+ Assert.AreEqual(DeadlockPolicy.OLDEST, deadlockPolicy);
+ else if (policy == "RANDOM")
+ Assert.AreEqual(DeadlockPolicy.RANDOM, deadlockPolicy);
+ else if (policy == "YOUNGEST")
+ Assert.AreEqual(DeadlockPolicy.YOUNGEST, deadlockPolicy);
+ else
+ throw new InvalidConfigException(name);
+ }
+ }
+
+ public static void ConfirmDuplicatesPolicy(
+ XmlElement xmlElem, string name,
+ DuplicatesPolicy duplicatedPolicy, bool compulsory)
+ {
+ XmlNode xmlNode;
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if (xmlNode.InnerText == "NONE")
+ Assert.AreEqual(duplicatedPolicy, DuplicatesPolicy.NONE);
+ else if (xmlNode.InnerText == "SORTED")
+ Assert.AreEqual(duplicatedPolicy, DuplicatesPolicy.SORTED);
+ else if (xmlNode.InnerText == "UNSORTED")
+ Assert.AreEqual(duplicatedPolicy, DuplicatesPolicy.UNSORTED);
+ }
+ }
+
+ public static void ConfirmEncryption(XmlElement xmlElem,
+ string name, string dPwd, EncryptionAlgorithm dAlg, bool compulsory)
+ {
+ EncryptionAlgorithm alg;
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem,
+ name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ string password = XMLReader.GetNode(
+ (XmlElement)xmlNode, "password").InnerText;
+ string tmp = XMLReader.GetNode(
+ (XmlElement)xmlNode, "algorithm").InnerText;
+ if (tmp == "AES")
+ alg = EncryptionAlgorithm.AES;
+ else
+ alg = EncryptionAlgorithm.DEFAULT;
+ Assert.AreEqual(dAlg, alg);
+ Assert.AreEqual(dPwd, dPwd);
+ }
+ }
+
+ public static void ConfirmInt(XmlElement xmlElem,
+ string name, int value, bool compulsory)
+ {
+ XmlNode xmlNode;
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ Assert.AreEqual(int.Parse(xmlNode.InnerText), value);
+ }
+
+ public static void ConfirmIsolation(XmlElement xmlElem,
+ string name, Isolation value, bool compulsory)
+ {
+ XmlNode xmlNode;
+ int isolationDegree;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ isolationDegree = int.Parse(xmlNode.InnerText);
+ if (isolationDegree == 1)
+ Assert.AreEqual(Isolation.DEGREE_ONE, value);
+ else if (isolationDegree == 2)
+ Assert.AreEqual(Isolation.DEGREE_TWO, value);
+ else if (isolationDegree == 3)
+ Assert.AreEqual(Isolation.DEGREE_THREE, value);
+ else
+ throw new InvalidConfigException(name);
+ }
+ }
+
+ public static void ConfirmLogFlush(XmlElement xmlElem,
+ string name, TransactionConfig.LogFlush value,
+ bool compulsory)
+ {
+ XmlNode xmlNode;
+ string logFlush;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ logFlush = xmlNode.InnerText;
+ if (logFlush == "DEFAULT")
+ Assert.AreEqual(TransactionConfig.LogFlush.DEFAULT, value);
+ else if (logFlush == "NOSYNC")
+ Assert.AreEqual(TransactionConfig.LogFlush.NOSYNC, value);
+ else if (logFlush == "WRITE_NOSYNC")
+ Assert.AreEqual(TransactionConfig.LogFlush.WRITE_NOSYNC, value);
+ else if (logFlush == "SYNC")
+ Assert.AreEqual(TransactionConfig.LogFlush.SYNC, value);
+ else
+ throw new InvalidConfigException(name);
+ }
+ }
+
+ public static void ConfirmLong(XmlElement xmlElem,
+ string name, long value, bool compulsory)
+ {
+ XmlNode xmlNode;
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ Assert.AreEqual(long.Parse(xmlNode.InnerText), value);
+ }
+
+ public static void ConfirmMaxSequentialWrites(
+ XmlElement xmlElem, string name,
+ uint mPause, int mWrites, bool compulsory)
+ {
+ XmlNode xmlNode;
+ uint pause;
+ int writes;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ writes = int.Parse(XMLReader.GetNode(
+ (XmlElement)xmlNode, "maxWrites").InnerText);
+ pause = uint.Parse(XMLReader.GetNode(
+ (XmlElement)xmlNode, "pause").InnerText);
+
+ Assert.AreEqual(mPause, pause);
+ Assert.AreEqual(mWrites, writes);
+ }
+ }
+
+ public static void ConfirmReplicationHostAddress(
+ XmlElement xmlElem, string name,
+ ReplicationHostAddress address, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ string host = XMLReader.GetNode(
+ (XmlElement)xmlNode, "Host").InnerText;
+ uint port = uint.Parse(XMLReader.GetNode(
+ (XmlElement)xmlNode, "Port").InnerText);
+
+ Assert.AreEqual(host, address.Host);
+ Assert.AreEqual(port, address.Port);
+ }
+ }
+
+ public static void ConfirmString(XmlElement xmlElem,
+ string name, string str, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ if (str != null)
+ {
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if (xmlNode.HasChildNodes)
+ Assert.AreEqual(
+ xmlNode.FirstChild.InnerText, str);
+ }
+ }
+ }
+
+ public static void ConfirmStringList(XmlElement xmlElem,
+ string name, List<string> strings, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ if (strings != null)
+ {
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ if (xmlNode.HasChildNodes)
+ {
+ XmlNodeList list = xmlNode.ChildNodes;
+ for (int i = 0; i < xmlNode.ChildNodes.Count;i++)
+ Assert.IsTrue(
+ strings.Contains(list[i].InnerText));
+ }
+ }
+ }
+ }
+
+ public static void ConfirmUint(XmlElement xmlElem,
+ string name, uint value, bool compulsory)
+ {
+ XmlNode xmlNode;
+
+ xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ Assert.AreEqual(uint.Parse(xmlNode.InnerText), value);
+ }
+
+ public static void ConfirmVerboseMessages(
+ XmlElement xmlElem, string name,
+ VerboseMessages verbose, bool compulsory)
+ {
+ XmlNode xmlNode = XMLReader.GetNode(xmlElem, name);
+ if (xmlNode == null && compulsory == true)
+ throw new ConfigNotFoundException(name);
+ else if (xmlNode != null)
+ {
+ ConfirmBool((XmlElement)xmlNode, "AllFileOps",
+ verbose.AllFileOps, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "Deadlock",
+ verbose.Deadlock, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "FileOps",
+ verbose.FileOps, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "Recovery",
+ verbose.Recovery, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "Register",
+ verbose.Register, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "Replication",
+ verbose.Replication, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "ReplicationElection",
+ verbose.ReplicationElection, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "ReplicationLease",
+ verbose.ReplicationLease, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "ReplicationMessages",
+ verbose.ReplicationMessages, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "ReplicationMisc",
+ verbose.ReplicationMisc, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "ReplicationSync",
+ verbose.ReplicationSync, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "RepMgrConnectionFailure",
+ verbose.RepMgrConnectionFailure, compulsory);
+ ConfirmBool((XmlElement)xmlNode, "RepMgrMisc",
+ verbose.RepMgrMisc, compulsory);
+ ConfirmBool((XmlElement)xmlNode,"WaitsForTable",
+ verbose.WaitsForTable, compulsory);
+ }
+ }
+ #endregion Confirm
+
+ public static void dbtFromString(DatabaseEntry dbt, string s)
+ {
+ dbt.Data = System.Text.Encoding.ASCII.GetBytes(s);
+ }
+
+ public static string strFromDBT(DatabaseEntry dbt)
+ {
+
+ System.Text.ASCIIEncoding decode = new ASCIIEncoding();
+ return decode.GetString(dbt.Data);
+ }
+
+ /*
+ * Reading params successfully returns true. Unless returns
+ * false. The retrieved Xml fragment is returning in xmlElem.
+ */
+ public static XmlElement TestSetUp(string testFixtureName, string testName)
+ {
+ XMLReader xmlReader = new XMLReader("../../AllTestData.xml");
+ XmlElement xmlElem = xmlReader.GetXmlElement(testFixtureName, testName);
+ if (xmlElem == null)
+ throw new ConfigNotFoundException(testFixtureName + ":" + testName);
+ else
+ return xmlElem;
+ }
+
+ /*
+ * Delete existing test output directory and its files,
+ * then create a new one.
+ */
+ public static void ClearDir(string testDir)
+ {
+ if (Directory.Exists(testDir))
+ Directory.Delete(testDir, true);
+ Directory.CreateDirectory(testDir);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/CursorConfigTest.cs b/db-4.8.30/test/scr037/CursorConfigTest.cs
new file mode 100644
index 0000000..1f586dc
--- /dev/null
+++ b/db-4.8.30/test/scr037/CursorConfigTest.cs
@@ -0,0 +1,81 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class CursorConfigTest
+ {
+ private string testFixtureName;
+ private string testFixtureHome;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "CursorConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ /*
+ * Configure the fields/properties and see if
+ * they are updated successfully.
+ */
+ CursorConfig cursorConfig = new CursorConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ Config(xmlElem, ref cursorConfig, true);
+ Confirm(xmlElem, cursorConfig, true);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ CursorConfig cursorConfig, bool compulsory)
+ {
+ Configuration.ConfirmIsolation(xmlElement,
+ "IsolationDegree", cursorConfig.IsolationDegree,
+ compulsory);
+ Configuration.ConfirmCachePriority(xmlElement,
+ "Priority", cursorConfig.Priority,
+ compulsory);
+ Configuration.ConfirmBool(xmlElement,
+ "SnapshotIsolation", cursorConfig.SnapshotIsolation,
+ compulsory);
+ Configuration.ConfirmBool(xmlElement,
+ "WriteCursor", cursorConfig.WriteCursor,
+ compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref CursorConfig cursorConfig, bool compulsory)
+ {
+ Configuration.ConfigIsolation(xmlElement,
+ "IsolationDegree", ref cursorConfig.IsolationDegree,
+ compulsory);
+ Configuration.ConfigCachePriority(xmlElement,
+ "Priority", ref cursorConfig.Priority, compulsory);
+ Configuration.ConfigBool(xmlElement,
+ "SnapshotIsolation", ref cursorConfig.SnapshotIsolation,
+ compulsory);
+ Configuration.ConfigBool(xmlElement,
+ "WriteCursor", ref cursorConfig.WriteCursor,
+ compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/CursorTest.cs b/db-4.8.30/test/scr037/CursorTest.cs
new file mode 100644
index 0000000..ec12dec
--- /dev/null
+++ b/db-4.8.30/test/scr037/CursorTest.cs
@@ -0,0 +1,1459 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class CursorTest
+ {
+ private string testFixtureName;
+ private string testFixtureHome;
+ private string testName;
+ private string testHome;
+ private DatabaseEnvironment paramEnv;
+ private BTreeDatabase paramDB;
+ private Transaction readTxn;
+ private Transaction updateTxn;
+
+ private EventWaitHandle signal;
+
+ private delegate void CursorMoveFuncDelegate(
+ Cursor cursor, LockingInfo lockingInfo);
+ private CursorMoveFuncDelegate cursorFunc;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "CursorTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ }
+
+ [Test]
+ public void TestAdd()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestAdd";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and a cursor.
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Add a record and confirm that it exists.
+ AddOneByCursor(db, cursor);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestCompare() {
+ BTreeDatabase db;
+ BTreeCursor dbc1, dbc2;
+ DatabaseEntry data, key;
+
+ testName = "TestCompare";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and a cursor. Then close it.
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out dbc1);
+ dbc2 = db.Cursor();
+
+ for (int i = 0; i < 10; i++) {
+ key = new DatabaseEntry(BitConverter.GetBytes(i));
+ data = new DatabaseEntry(BitConverter.GetBytes(i));
+ db.Put(key, data);
+ }
+ key = new DatabaseEntry(BitConverter.GetBytes(5));
+ Assert.IsTrue(dbc1.Move(key, true));
+ Assert.IsTrue(dbc2.Move(key, true));
+ Assert.IsTrue(dbc1.Compare(dbc2));
+ Assert.IsTrue(dbc1.MoveNext());
+ Assert.IsFalse(dbc1.Compare(dbc2));
+ dbc1.Close();
+ dbc2.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestClose()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestClose";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and a cursor. Then close it.
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestCount()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestCount";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Write one record into database with cursor.
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+ AddOneByCursor(db, cursor);
+
+ /*
+ * Confirm that that the count operation returns 1 as
+ * the number of records in the database.
+ */
+ Assert.AreEqual(1, cursor.Count());
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestCurrent()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestCurrent";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Write a record into database with cursor.
+ GetCursorInBtreeDBWithoutEnv(testHome,
+ testName, out db, out cursor);
+ AddOneByCursor(db, cursor);
+
+ /*
+ * Confirm the current record that the cursor
+ * points to is the one that just added by the
+ * cursor.
+ */
+ Assert.IsTrue(cursor.MoveFirst());
+ Assert.AreEqual(
+ ASCIIEncoding.ASCII.GetBytes("key"),
+ cursor.Current.Key.Data);
+ Assert.AreEqual(
+ ASCIIEncoding.ASCII.GetBytes("data"),
+ cursor.Current.Value.Data);
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestDelete()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestDelete";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Write a record into database with cursor.
+ GetCursorInBtreeDBWithoutEnv(testHome,
+ testName, out db, out cursor);
+ AddOneByCursor(db, cursor);
+
+ // Delete the current record.
+ cursor.Delete();
+
+ // Confirm that the record no longer exists in the db.
+ Assert.AreEqual(0, cursor.Count());
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestDispose()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestDispose";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Write a record into database with cursor.
+ GetCursorInBtreeDBWithoutEnv(testHome,
+ testName, out db, out cursor);
+
+ // Dispose the cursor.
+ cursor.Dispose();
+
+ db.Close();
+ }
+
+ [Test]
+ public void TestIsolationDegree()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ CursorConfig cursorConfig;
+ DatabaseEnvironment env;
+ Transaction txn;
+
+ testName = "TestIsolationDegree";
+ testHome = testFixtureHome + "/" + testName;
+
+ Isolation[] isolationDegrees = new Isolation[3];
+ isolationDegrees[0] = Isolation.DEGREE_ONE;
+ isolationDegrees[1] = Isolation.DEGREE_TWO;
+ isolationDegrees[2] = Isolation.DEGREE_THREE;
+
+ IsolationDelegate[] delegates = {
+ new IsolationDelegate(CursorReadUncommited),
+ new IsolationDelegate(CursorReadCommited),
+ new IsolationDelegate(CursorRead)};
+
+ cursorConfig = new CursorConfig();
+ for (int i = 0; i < 3; i++)
+ {
+ cursorConfig.IsolationDegree = isolationDegrees[i];
+ GetCursorInBtreeDBInTDS(testHome + "/" + i.ToString(),
+ testName, cursorConfig, out env, out db,
+ out cursor, out txn);
+ cursor.Close();
+ db.Close();
+ txn.Commit();
+ env.Close();
+ }
+ }
+
+ public delegate void IsolationDelegate(
+ DatabaseEnvironment env, BTreeDatabase db,
+ Cursor cursor, Transaction txn);
+
+ /*
+ * Configure a transactional cursor to have degree 2
+ * isolation, which permits data read by this cursor
+ * to be deleted prior to the commit of the transaction
+ * for this cursor.
+ */
+ public void CursorReadCommited(
+ DatabaseEnvironment env, BTreeDatabase db,
+ Cursor cursor, Transaction txn)
+ {
+ Console.WriteLine("CursorReadCommited");
+ }
+
+ /*
+ * Configure a transactional cursor to have degree 1
+ * isolation. The cursor's read operations could return
+ * modified but not yet commited data.
+ */
+ public void CursorReadUncommited(
+ DatabaseEnvironment env, BTreeDatabase db,
+ Cursor cursor, Transaction txn)
+ {
+ Console.WriteLine("CursorReadUncommited");
+ }
+
+ /*
+ * Only return committed data.
+ */
+ public void CursorRead(
+ DatabaseEnvironment env, BTreeDatabase db,
+ Cursor cursor, Transaction txn)
+ {
+ Console.WriteLine("CursorRead");
+ }
+
+
+ [Test]
+ public void TestMoveToExactKey()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMoveToExactKey";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome,
+ testName, out db, out cursor);
+
+ // Add one record into database.
+ DatabaseEntry key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ DatabaseEntry data = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair =
+ new KeyValuePair<DatabaseEntry,DatabaseEntry>(key, data);
+ cursor.Add(pair);
+
+ // Move the cursor exactly to the specified key.
+ MoveCursor(cursor, false);
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveToExactPair()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+ DatabaseEntry key, data;
+
+ testName = "TestMoveToExactPair";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome,
+ testName, out db, out cursor);
+
+ // Add one record into database.
+ key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair =
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(key, data);
+ cursor.Add(pair);
+
+ // Move the cursor exactly to the specified key/data pair.
+ MoveCursor(cursor, true);
+ cursor.Close();
+ db.Close();
+
+ }
+
+ [Test]
+ public void TestMoveWithRMW()
+ {
+ testName = "TestMoveWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ // Use MoveCursor() as its move function.
+ cursorFunc = new CursorMoveFuncDelegate(MoveCursor);
+
+ // Move to a specified key and a key/data pair.
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestMoveFirst()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMoveFirst";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+ AddOneByCursor(db, cursor);
+
+ // Move to the first record.
+ MoveCursorToFirst(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveFirstWithRMW()
+ {
+ testName = "TestMoveFirstWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ // Use MoveCursorToFirst() as its move function.
+ cursorFunc = new CursorMoveFuncDelegate(MoveCursorToFirst);
+
+ // Read the first record with write lock.
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestMoveLast()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMoveLast";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+ AddOneByCursor(db, cursor);
+
+ // Move the cursor to the last record.
+ MoveCursorToLast(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveLastWithRMW()
+ {
+ testName = "TestMoveLastWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ // Use MoveCursorToLast() as its move function.
+ cursorFunc = new CursorMoveFuncDelegate(MoveCursorToLast);
+
+ // Read the last recod with write lock.
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestMoveNext()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMoveCursorToNext";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Put ten records to the database.
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+
+ // Move the cursor from the first record to the fifth.
+ MoveCursorToNext(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNextWithRMW()
+ {
+ testName = "TestMoveLastWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ // Use MoveCursorToNext() as its move function.
+ cursorFunc = new CursorMoveFuncDelegate(
+ MoveCursorToNext);
+
+ /*
+ * Read the first record to the fifth record with
+ * write lock.
+ */
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestMoveNextDuplicate()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMoveNextDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Add ten duplicate records to the database.
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(
+ BitConverter.GetBytes(i)));
+
+ /*
+ * Move the cursor from one duplicate record to
+ * another duplicate one.
+ */
+ MoveCursorToNextDuplicate(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNextDuplicateWithRMW()
+ {
+ testName = "TestMoveNextDuplicateWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Use MoveCursorToNextDuplicate() as its
+ * move function.
+ */
+ cursorFunc = new CursorMoveFuncDelegate(
+ MoveCursorToNextDuplicate);
+
+ /*
+ * Read the first record to the fifth record with
+ * write lock.
+ */
+ MoveWithRMW(testHome, testName);
+ }
+
+
+ [Test]
+ public void TestMoveNextUnique()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMoveNextUnique";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Add ten different records to the database.
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(
+ BitConverter.GetBytes(i)));
+
+ // Move to five unique records.
+ MoveCursorToNextUnique(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNextUniqueWithRMW()
+ {
+ testName = "TestMoveNextUniqueWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Use MoveCursorToNextUnique() as its
+ * move function.
+ */
+ cursorFunc = new CursorMoveFuncDelegate(
+ MoveCursorToNextUnique);
+
+ /*
+ * Move to five unique records.
+ */
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestMovePrev()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMovePrev";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Add ten records to the database.
+ for (int i = 0; i < 10; i++)
+ AddOneByCursor(db, cursor);
+
+ // Move the cursor to previous five records
+ MoveCursorToPrev(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMovePrevWithRMW()
+ {
+ testName = "TestMovePrevWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Use MoveCursorToNextDuplicate() as its
+ * move function.
+ */
+ cursorFunc = new CursorMoveFuncDelegate(
+ MoveCursorToPrev);
+
+ // Read previous record in write lock.
+ MoveWithRMW(testHome, testName);
+ }
+
+
+ [Test]
+ public void TestMovePrevDuplicate()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMovePrevDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Add ten records to the database.
+ for (int i = 0; i < 10; i++)
+ AddOneByCursor(db, cursor);
+
+ // Move the cursor to previous duplicate records.
+ MoveCursorToPrevDuplicate(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMovePrevDuplicateWithRMW()
+ {
+ testName = "TestMovePrevDuplicateWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Use MoveCursorToNextDuplicate() as its
+ * move function.
+ */
+ cursorFunc = new CursorMoveFuncDelegate(
+ MoveCursorToPrevDuplicate);
+
+ // Read the previous duplicate record in write lock.
+ MoveWithRMW(testHome, testName);
+ }
+
+
+ [Test]
+ public void TestMovePrevUnique()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestMovePrevUnique";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Add ten records to the database.
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+
+ // Move the cursor to previous unique records.
+ MoveCursorToPrevUnique(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMovePrevUniqueWithRMW()
+ {
+ testName = "TestMovePrevDuplicateWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Use MoveCursorToPrevUnique() as its
+ * move function.
+ */
+ cursorFunc = new CursorMoveFuncDelegate(
+ MoveCursorToPrevUnique);
+
+ // Read the previous unique record in write lock.
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestPriority()
+ {
+ BTreeCursor cursor;
+ BTreeDatabase db;
+ CachePriority[] priorities;
+ CursorConfig cursorConfig;
+ DatabaseEnvironment env;
+
+ cursorConfig = new CursorConfig();
+
+ priorities = new CachePriority[5];
+ priorities[0] = CachePriority.DEFAULT;
+ priorities[1] = CachePriority.HIGH;
+ priorities[2] = CachePriority.LOW;
+ priorities[3] = CachePriority.VERY_HIGH;
+ priorities[4] = CachePriority.VERY_LOW;
+
+ testName = "TestPriority";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ for (int i = 0; i < 5; i++)
+ {
+ // Configure the cursor priority.
+ cursorConfig.Priority = priorities[i];
+
+ // Open a database to test a specified priority.
+ GetCursorInBtreeDBInCDS(testHome, testName,
+ cursorConfig, out env, out db, out cursor);
+ Assert.AreEqual(priorities[i], cursorConfig.Priority);
+ cursor.Close();
+ db.Close();
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestRefresh()
+ {
+ BTreeDatabase db;
+ BTreeCursor cursor;
+
+ testName = "TestRefresh";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ GetCursorInBtreeDBWithoutEnv(testHome, testName,
+ out db, out cursor);
+
+ // Write a record with cursor and Refresh the cursor.
+ MoveCursorToCurrentRec(cursor, null);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestRefreshWithRMW()
+ {
+ testName = "TestRefreshWithRMW";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ cursorFunc = new CursorMoveFuncDelegate(
+ MoveCursorToCurrentRec);
+
+ // Read the previous unique record in write lock.
+ MoveWithRMW(testHome, testName);
+ }
+
+ [Test]
+ public void TestSnapshotIsolation()
+ {
+ BTreeDatabaseConfig dbConfig;
+ DatabaseEntry key, data;
+ DatabaseEnvironmentConfig envConfig;
+ Thread readThread, updateThread;
+ Transaction txn;
+
+ updateTxn = null;
+ readTxn = null;
+ paramDB = null;
+ paramEnv = null;
+ testName = "TestSnapshotIsolation";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment with DB_MULTIVERSION
+ * which is required by DB_TXN_SNAPSHOT.
+ */
+ envConfig = new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMVCC = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLocking = true;
+ envConfig.TxnTimeout = 1000;
+ paramEnv = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ paramEnv.DetectDeadlocks(DeadlockPolicy.YOUNGEST);
+
+ /*
+ * Open a transactional database and put 1000 records
+ * into it within transaction.
+ */
+ txn = paramEnv.BeginTransaction();
+ dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.UseMVCC = true;
+ dbConfig.Env = paramEnv;
+ paramDB = BTreeDatabase.Open(
+ testName + ".db", dbConfig, txn);
+ for (int i = 0; i < 256; i++)
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ paramDB.Put(key, data, txn);
+ }
+ txn.Commit();
+
+ /*
+ * Begin two threads, read and update thread.
+ * The update thread runs a update transaction
+ * using full read/write locking. The read thread
+ * set DB_TXN_SNAPSHOT on read-only cursor.
+ */
+ readThread = new Thread(new ThreadStart(ReadTxn));
+ updateThread = new Thread(new ThreadStart(UpdateTxn));
+ updateThread.Start();
+ Thread.Sleep(1000);
+ readThread.Start();
+ readThread.Join();
+ updateThread.Join();
+
+ // Commit transacion in both two threads.
+ if (updateTxn != null)
+ updateTxn.Commit();
+ if (readTxn != null)
+ readTxn.Commit();
+
+ /*
+ * Confirm that the overwrite operation works.
+ */
+ ConfirmOverwrite();
+
+ paramDB.Close();
+ paramEnv.Close();
+ }
+
+ public void ReadTxn()
+ {
+ // Get a new transaction for reading the db.
+ TransactionConfig txnConfig =
+ new TransactionConfig();
+ txnConfig.Snapshot = true;
+ readTxn = paramEnv.BeginTransaction(
+ txnConfig);
+
+ // Get a new cursor for putting record into db.
+ CursorConfig cursorConfig = new CursorConfig();
+ cursorConfig.WriteCursor = false;
+ BTreeCursor cursor = paramDB.Cursor(
+ cursorConfig, readTxn);
+
+ // Continually reading record from db.
+ try
+ {
+ Assert.IsTrue(cursor.MoveFirst());
+ int i = 0;
+ do
+ {
+ Assert.AreEqual(
+ BitConverter.ToInt32(
+ cursor.Current.Key.Data, 0),
+ BitConverter.ToInt32(
+ cursor.Current.Value.Data, 0));
+ } while (i <= 1000 && cursor.MoveNext());
+ }
+ catch (DeadlockException)
+ {
+ }
+ finally
+ {
+ cursor.Close();
+ }
+ }
+
+ public void UpdateTxn()
+ {
+ int int32Value;
+ DatabaseEntry data;
+
+ // Get a new transaction for updating the db.
+ TransactionConfig txnConfig =
+ new TransactionConfig();
+ txnConfig.IsolationDegree =
+ Isolation.DEGREE_THREE;
+
+ updateTxn =
+ paramEnv.BeginTransaction(txnConfig);
+
+ // Continually putting record to db.
+
+ BTreeCursor cursor =
+ paramDB.Cursor(updateTxn);
+
+ // Move the cursor to the first record.
+ Assert.IsTrue(cursor.MoveFirst());
+ int i = 0;
+ try
+ {
+ do
+ {
+
+ int32Value = BitConverter.ToInt32(
+ cursor.Current.Value.Data, 0);
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(int32Value - 1));
+ cursor.Overwrite(data);
+ } while (i <= 1000 && cursor.MoveNext());
+ }
+ catch (DeadlockException)
+ {
+ }
+ finally
+ {
+ cursor.Close();
+ }
+ }
+
+
+ [Test]
+ public void TestWriteCursor()
+ {
+ BTreeCursor cursor;
+ BTreeDatabase db;
+ CursorConfig cursorConfig;
+ DatabaseEnvironment env;
+
+ testName = "TestWriteCursor";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ cursorConfig = new CursorConfig();
+ cursorConfig.WriteCursor = true;
+
+ GetCursorInBtreeDBInCDS(testHome, testName,
+ cursorConfig, out env, out db, out cursor);
+
+ /*
+ * Add a record by cursor to the database. If the
+ * WriteCursor doesn't work, exception will be
+ * throwed in the environment which is configured
+ * with DB_INIT_CDB.
+ */
+ try
+ {
+ AddOneByCursor(db, cursor);
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ cursor.Close();
+ db.Close();
+ env.Close();
+ }
+ }
+
+ public void ConfirmOverwrite()
+ {
+ Transaction confirmTxn = paramEnv.BeginTransaction();
+ BTreeCursor cursor = paramDB.Cursor(confirmTxn);
+
+ int i = 0;
+ Assert.IsTrue(cursor.MoveFirst());
+ do
+ {
+ Assert.AreNotEqual(
+ cursor.Current.Key.Data,
+ cursor.Current.Value.Data);
+ } while (i <= 1000 && cursor.MoveNext());
+
+ cursor.Close();
+ confirmTxn.Commit();
+ }
+
+ public static void AddOneByCursor(Database db, Cursor cursor)
+ {
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ // Add a record to db via cursor.
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data"));
+ pair = new KeyValuePair<DatabaseEntry,DatabaseEntry>(key, data);
+ cursor.Add(pair);
+
+ // Confirm that the record has been put to the database.
+ Assert.IsTrue(db.Exists(key));
+ }
+
+
+ public static void GetCursorInBtreeDBWithoutEnv(
+ string home, string name, out BTreeDatabase db,
+ out BTreeCursor cursor)
+ {
+ string dbFileName = home + "/" + name + ".db";
+
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ db = BTreeDatabase.Open(dbFileName, dbConfig);
+ cursor = db.Cursor();
+ }
+
+ public static void GetCursorInBtreeDBInTDS(
+ string home, string name,
+ CursorConfig cursorConfig,
+ out DatabaseEnvironment env, out BTreeDatabase db,
+ out BTreeCursor cursor, out Transaction txn)
+ {
+ string dbFileName = name + ".db";
+
+ Configuration.ClearDir(home);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.NoMMap = false;
+ envConfig.UseLocking = true;
+ env = DatabaseEnvironment.Open(home, envConfig);
+
+ // Begin a transaction.
+ txn = env.BeginTransaction();
+
+ /*
+ * Open an btree database. The underlying database
+ * should be opened with ReadUncommitted if the
+ * cursor's isolation degree will be set to be 1.
+ */
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ if (cursorConfig.IsolationDegree == Isolation.DEGREE_ONE)
+ dbConfig.ReadUncommitted = true;
+
+ db = BTreeDatabase.Open(dbFileName, dbConfig, txn);
+
+ // Get a cursor in the transaction.
+ cursor = db.Cursor(cursorConfig, txn);
+ }
+
+ // Get a cursor in CDS.
+ public static void GetCursorInBtreeDBInCDS(
+ string home, string name,
+ CursorConfig cursorConfig,
+ out DatabaseEnvironment env, out BTreeDatabase db,
+ out BTreeCursor cursor)
+ {
+ string dbFileName = name + ".db";
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseCDB = true;
+ envConfig.UseMPool = true;
+ env = DatabaseEnvironment.Open(home, envConfig);
+
+ /*
+ * Open an btree database. The underlying database
+ * should be opened with ReadUncommitted if the
+ * cursor's isolation degree will be set to be 1.
+ */
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+
+ if (cursorConfig.IsolationDegree == Isolation.DEGREE_ONE)
+ dbConfig.ReadUncommitted = true;
+
+ db = BTreeDatabase.Open(dbFileName, dbConfig);
+
+ // Get a cursor in the transaction.
+ cursor = db.Cursor(cursorConfig);
+ }
+
+ public void RdMfWt()
+ {
+ Transaction txn = paramEnv.BeginTransaction();
+ Cursor dbc = paramDB.Cursor(txn);
+
+ try
+ {
+ LockingInfo lck = new LockingInfo();
+ lck.ReadModifyWrite = true;
+
+ // Read record.
+ cursorFunc(dbc, lck);
+
+ // Block the current thread until event is set.
+ signal.WaitOne();
+
+ // Write new records into database.
+ DatabaseEntry key = new DatabaseEntry(
+ BitConverter.GetBytes(55));
+ DatabaseEntry data = new DatabaseEntry(
+ BitConverter.GetBytes(55));
+ dbc.Add(new KeyValuePair<DatabaseEntry,
+ DatabaseEntry>(key, data));
+
+ dbc.Close();
+ txn.Commit();
+ }
+ catch (DeadlockException)
+ {
+ dbc.Close();
+ txn.Abort();
+ }
+ }
+
+
+ public void MoveWithRMW(string home, string name)
+ {
+ paramEnv = null;
+ paramDB = null;
+
+ // Open the environment.
+ DatabaseEnvironmentConfig envCfg =
+ new DatabaseEnvironmentConfig();
+ envCfg.Create = true;
+ envCfg.FreeThreaded = true;
+ envCfg.UseLocking = true;
+ envCfg.UseLogging = true;
+ envCfg.UseMPool = true;
+ envCfg.UseTxns = true;
+ paramEnv = DatabaseEnvironment.Open(home, envCfg);
+
+ // Open database in transaction.
+ Transaction openTxn = paramEnv.BeginTransaction();
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ cfg.Env = paramEnv;
+ cfg.FreeThreaded = true;
+ cfg.PageSize = 4096;
+ cfg.Duplicates = DuplicatesPolicy.UNSORTED;
+ paramDB = BTreeDatabase.Open(name + ".db", cfg, openTxn);
+ openTxn.Commit();
+
+ /*
+ * Put 10 different, 2 duplicate and another different
+ * records into database.
+ */
+ Transaction txn = paramEnv.BeginTransaction();
+ for (int i = 0; i < 13; i++)
+ {
+ DatabaseEntry key, data;
+ if (i == 10 || i == 11)
+ {
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ }
+ else
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ }
+ paramDB.Put(key, data, txn);
+ }
+
+ txn.Commit();
+
+ // Get a event wait handle.
+ signal = new EventWaitHandle(false, EventResetMode.ManualReset);
+
+ /*
+ * Start RdMfWt() in two threads. RdMfWt() reads
+ * and writes data into database.
+ */
+ Thread t1 = new Thread(new ThreadStart(RdMfWt));
+ Thread t2 = new Thread(new ThreadStart(RdMfWt));
+ t1.Start();
+ t2.Start();
+
+ /*
+ * Give both threads time to read before signalling
+ * them to write.
+ */
+ Thread.Sleep(1000);
+
+ // Invoke the write operation in both threads.
+ signal.Set();
+
+ // Return the number of deadlocks.
+ while (t1.IsAlive || t2.IsAlive)
+ {
+ /*
+ * Give both threads time to write before
+ * counting the number of deadlocks.
+ */
+ Thread.Sleep(1000);
+ uint deadlocks = paramEnv.DetectDeadlocks(
+ DeadlockPolicy.DEFAULT);
+
+ // Confirm that there won't be any deadlock.
+ Assert.AreEqual(0, deadlocks);
+ }
+
+ t1.Join();
+ t2.Join();
+ paramDB.Close();
+ paramEnv.Close();
+ }
+
+ /*
+ * Move the cursor to an exisiting key or key/data pair.
+ */
+ public void MoveCursor(Cursor dbc, bool ifPair)
+ {
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ if (ifPair == false)
+ Assert.IsTrue(dbc.Move(key, true));
+ else
+ {
+ data = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ pair = new KeyValuePair<DatabaseEntry,
+ DatabaseEntry>(key, data);
+ Assert.IsTrue(dbc.Move(pair, true));
+ }
+ }
+
+ /*
+ * Move the cursor to an exisiting key and key/data
+ * pair with LockingInfo.
+ */
+ public void MoveCursor(Cursor dbc, LockingInfo lck)
+ {
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ data = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ pair = new KeyValuePair<DatabaseEntry,
+ DatabaseEntry>(key, data);
+
+ // Move to an existing key.
+ Assert.IsTrue(dbc.Move(key, true, lck));
+
+ // Move to an existing key/data pair.
+ Assert.IsTrue(dbc.Move(pair, true, lck));
+ }
+
+ /*
+ * Move the cursor to the first record in a nonempty
+ * database. The returning value should be true.
+ */
+ public void MoveCursorToFirst(Cursor dbc, LockingInfo lck)
+ {
+ if (lck == null)
+ Assert.IsTrue(dbc.MoveFirst());
+ else
+ Assert.IsTrue(dbc.MoveFirst(lck));
+ }
+
+ /*
+ * Move the cursor to last record in a nonempty
+ * database. The returning value should be true.
+ */
+ public void MoveCursorToLast(Cursor dbc, LockingInfo lck)
+ {
+ if (lck == null)
+ Assert.IsTrue(dbc.MoveLast());
+ else
+ Assert.IsTrue(dbc.MoveLast(lck));
+ }
+
+ /*
+ * Move the cursor to the next record in the database
+ * with more than five records. The returning values of
+ * every move operation should be true.
+ */
+ public void MoveCursorToNext(Cursor dbc, LockingInfo lck)
+ {
+ for (int i = 0; i < 5; i++)
+ if (lck == null)
+ Assert.IsTrue(dbc.MoveNext());
+ else
+ Assert.IsTrue(dbc.MoveNext(lck));
+ }
+
+ /*
+ * Move the cursor to the next duplicate record in
+ * the database which has more than 2 duplicate
+ * records. The returning value should be true.
+ */
+ public void MoveCursorToNextDuplicate(Cursor dbc,
+ LockingInfo lck)
+ {
+ DatabaseEntry key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+
+ /*
+ * The cursor should point to any record in the
+ * database before it move to the next duplicate
+ * record.
+ */
+ if (lck == null)
+ {
+ dbc.Move(key, true);
+ Assert.IsTrue(dbc.MoveNextDuplicate());
+ }
+ else
+ {
+ /*
+ * Both the move and move next duplicate
+ * operation should use LockingInfo. If any
+ * one doesn't use LockingInfo, deadlock still
+ * occurs.
+ */
+ dbc.Move(key, true, lck);
+ Assert.IsTrue(dbc.MoveNextDuplicate(lck));
+ }
+ }
+
+ /*
+ * Move the cursor to next unique record in the database.
+ * The returning value should be true.
+ */
+ public void MoveCursorToNextUnique(Cursor dbc,
+ LockingInfo lck)
+ {
+ for (int i = 0; i < 5; i++)
+ {
+ if (lck == null)
+ Assert.IsTrue(dbc.MoveNextUnique());
+ else
+ Assert.IsTrue(dbc.MoveNextUnique(lck));
+ }
+ }
+
+ /*
+ * Move the cursor to previous record in the database.
+ * The returning value should be true;
+ */
+ public void MoveCursorToPrev(Cursor dbc,
+ LockingInfo lck)
+ {
+ if (lck == null)
+ {
+ dbc.MoveLast();
+ for (int i = 0; i < 5; i++)
+ Assert.IsTrue(dbc.MovePrev());
+ }
+ else
+ {
+ dbc.MoveLast(lck);
+ for (int i = 0; i < 5; i++)
+ Assert.IsTrue(dbc.MovePrev(lck));
+ }
+
+ }
+
+ /*
+ * Move the cursor to a duplicate record and then to
+ * another duplicate one. And finally move to it previous
+ * one. Since the previous duplicate one exist, the return
+ * value of move previous duplicate record should be
+ * true;
+ */
+ public void MoveCursorToPrevDuplicate(Cursor dbc,
+ LockingInfo lck)
+ {
+ if (lck == null)
+ {
+ dbc.Move(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")), true);
+ dbc.MoveNextDuplicate();
+ Assert.IsTrue(dbc.MovePrevDuplicate());
+ }
+ else
+ {
+ dbc.Move(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")),true, lck);
+ dbc.MoveNextDuplicate(lck);
+ Assert.IsTrue(dbc.MovePrevDuplicate(lck));
+ }
+ }
+
+ /*
+ * Move the cursor to previous unique record in a
+ * database with more than 2 records. The returning
+ * value should be true.
+ */
+ public void MoveCursorToPrevUnique(Cursor dbc,
+ LockingInfo lck)
+ {
+ for (int i = 0; i < 5; i++)
+ if (lck == null)
+ dbc.MovePrevUnique();
+ else
+ dbc.MovePrevUnique(lck);
+ }
+
+ /*
+ * Move the cursor to current existing record. The returning
+ * value should be true.
+ */
+ public void MoveCursorToCurrentRec(Cursor dbc,
+ LockingInfo lck)
+ {
+ // Add a record to the database.
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ pair = new KeyValuePair<DatabaseEntry,DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")));
+ dbc.Add(pair);
+
+ if (lck == null)
+ dbc.Refresh();
+ else
+ dbc.Refresh(lck);
+
+ Assert.IsNotNull(dbc.Current.Key);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/DatabaseConfigTest.cs b/db-4.8.30/test/scr037/DatabaseConfigTest.cs
new file mode 100644
index 0000000..8b1c01f
--- /dev/null
+++ b/db-4.8.30/test/scr037/DatabaseConfigTest.cs
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.Data;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class DatabaseConfigTest
+ {
+ [Test]
+ virtual public void TestConfigWithoutEnv()
+ {
+ string testName = "TestConfigWithoutEnv";
+ string testFixtureName = "DatabaseConfigTest";
+
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ Config(xmlElem, ref dbConfig, true);
+ Confirm(xmlElem, dbConfig, true);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref DatabaseConfig dbConfig, bool compulsory)
+ {
+ uint pageSize = new uint();
+
+ Configuration.ConfigBool(xmlElement, "AutoCommit",
+ ref dbConfig.AutoCommit, compulsory);
+ Configuration.ConfigByteOrder(xmlElement, "ByteOrder",
+ ref dbConfig.ByteOrder, compulsory);
+ Configuration.ConfigCacheInfo(xmlElement, "CacheSize",
+ ref dbConfig.CacheSize, compulsory);
+ Configuration.ConfigBool(xmlElement, "DoChecksum",
+ ref dbConfig.DoChecksum, compulsory);
+ Configuration.ConfigString(xmlElement, "ErrorPrefix",
+ ref dbConfig.ErrorPrefix, compulsory);
+ Configuration.ConfigBool(xmlElement, "FreeThreaded",
+ ref dbConfig.FreeThreaded, compulsory);
+ Configuration.ConfigBool(xmlElement, "NoMMap",
+ ref dbConfig.NoMMap, compulsory);
+ Configuration.ConfigBool(xmlElement, "NonDurableTxns",
+ ref dbConfig.NonDurableTxns, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "PageSize",
+ ref pageSize, compulsory))
+ dbConfig.PageSize = pageSize;
+ Configuration.ConfigCachePriority(xmlElement,
+ "Priority", ref dbConfig.Priority, compulsory);
+ Configuration.ConfigBool(xmlElement, "ReadOnly",
+ ref dbConfig.ReadOnly, compulsory);
+ Configuration.ConfigBool(xmlElement, "ReadUncommitted",
+ ref dbConfig.ReadUncommitted, compulsory);
+ Configuration.ConfigEncryption(xmlElement,
+ "Encryption", dbConfig, compulsory);
+ Configuration.ConfigBool(xmlElement, "Truncate",
+ ref dbConfig.Truncate, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseMVCC",
+ ref dbConfig.UseMVCC, compulsory);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ DatabaseConfig dbConfig, bool compulsory)
+ {
+ Configuration.ConfirmBool(xmlElement, "AutoCommit",
+ dbConfig.AutoCommit, compulsory);
+ Configuration.ConfirmByteOrder(xmlElement, "ByteOrder",
+ dbConfig.ByteOrder, compulsory);
+ Configuration.ConfirmCacheSize(xmlElement, "CacheSize",
+ dbConfig.CacheSize, compulsory);
+ Configuration.ConfirmBool(xmlElement, "DoChecksum",
+ dbConfig.DoChecksum, compulsory);
+ Configuration.ConfirmEncryption(xmlElement, "Encryption",
+ dbConfig.EncryptionPassword,
+ dbConfig.EncryptAlgorithm, compulsory);
+ Configuration.ConfirmString(xmlElement, "ErrorPrefix",
+ dbConfig.ErrorPrefix, compulsory);
+ Configuration.ConfirmBool(xmlElement, "FreeThreaded",
+ dbConfig.FreeThreaded, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoMMap",
+ dbConfig.NoMMap, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NonDurableTxns",
+ dbConfig.NonDurableTxns, compulsory);
+ Configuration.ConfirmUint(xmlElement, "PageSize",
+ dbConfig.PageSize, compulsory);
+ Configuration.ConfirmCachePriority(xmlElement,
+ "Priority", dbConfig.Priority, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ReadOnly",
+ dbConfig.ReadOnly, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ReadUncommitted",
+ dbConfig.ReadUncommitted, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Truncate",
+ dbConfig.Truncate, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseMVCC",
+ dbConfig.UseMVCC, compulsory);
+ }
+ }
+}
+
diff --git a/db-4.8.30/test/scr037/DatabaseEnvironmentConfigTest.cs b/db-4.8.30/test/scr037/DatabaseEnvironmentConfigTest.cs
new file mode 100644
index 0000000..f092ae4
--- /dev/null
+++ b/db-4.8.30/test/scr037/DatabaseEnvironmentConfigTest.cs
@@ -0,0 +1,299 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using System.Xml.XPath;
+using BerkeleyDB;
+using NUnit.Framework;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class DatabaseEnvironmentConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "DatabaseEnvironmentConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ Config(xmlElem, ref envConfig, true);
+ Confirm(xmlElem, envConfig, true);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ DatabaseEnvironmentConfig envConfig, bool compulsory)
+ {
+ Configuration.ConfirmBool(xmlElement, "AutoCommit",
+ envConfig.AutoCommit, compulsory);
+ Configuration.ConfirmBool(xmlElement, "CDB_ALLDB",
+ envConfig.CDB_ALLDB, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Create",
+ envConfig.Create, compulsory);
+ Configuration.ConfirmStringList(xmlElement, "DataDirs",
+ envConfig.DataDirs, compulsory);
+ Configuration.ConfirmString(xmlElement, "ErrorPrefix",
+ envConfig.ErrorPrefix, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ForceFlush",
+ envConfig.ForceFlush, compulsory);
+ Configuration.ConfirmBool(xmlElement, "FreeThreaded",
+ envConfig.FreeThreaded, compulsory);
+ Configuration.ConfirmBool(xmlElement, "InitRegions",
+ envConfig.InitRegions, compulsory);
+ Configuration.ConfirmString(xmlElement,
+ "IntermediateDirMode",
+ envConfig.IntermediateDirMode, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Lockdown",
+ envConfig.Lockdown, compulsory);
+ Configuration.ConfirmUint(xmlElement, "LockTimeout",
+ envConfig.LockTimeout, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MaxTransactions",
+ envConfig.MaxTransactions, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoBuffer",
+ envConfig.NoBuffer, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoLocking",
+ envConfig.NoLocking, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoMMap",
+ envConfig.NoMMap, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoLocking",
+ envConfig.NoLocking, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoPanic",
+ envConfig.NoPanic, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Overwrite",
+ envConfig.Overwrite, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Private",
+ envConfig.Private, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Register",
+ envConfig.Register, compulsory);
+ Configuration.ConfirmBool(xmlElement, "RunFatalRecovery",
+ envConfig.RunFatalRecovery, compulsory);
+ Configuration.ConfirmBool(xmlElement, "RunRecovery",
+ envConfig.RunRecovery, compulsory);
+ Configuration.ConfirmBool(xmlElement, "SystemMemory",
+ envConfig.SystemMemory, compulsory);
+ Configuration.ConfirmString(xmlElement, "TempDir",
+ envConfig.TempDir, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TimeNotGranted",
+ envConfig.TimeNotGranted, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnNoSync",
+ envConfig.TxnNoSync, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnNoWait",
+ envConfig.TxnNoWait, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnSnapshot",
+ envConfig.TxnSnapshot, compulsory);
+ Configuration.ConfirmDateTime(xmlElement,"TxnTimestamp",
+ envConfig.TxnTimestamp, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnWriteNoSync",
+ envConfig.TxnWriteNoSync, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseCDB",
+ envConfig.UseCDB, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseLocking",
+ envConfig.UseLocking, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseLogging",
+ envConfig.UseLogging, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseMPool",
+ envConfig.UseMPool, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseMVCC",
+ envConfig.UseMVCC, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseReplication",
+ envConfig.UseReplication, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseTxns",
+ envConfig.UseTxns, compulsory);
+ envConfig.Verbosity = new VerboseMessages();
+ Configuration.ConfirmVerboseMessages(xmlElement,
+ "Verbosity", envConfig.Verbosity, compulsory);
+ Configuration.ConfirmBool(xmlElement, "YieldCPU",
+ envConfig.YieldCPU, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref DatabaseEnvironmentConfig envConfig, bool compulsory)
+ {
+ uint value = new uint();
+ DateTime time = new DateTime();
+
+ Configuration.ConfigBool(xmlElement, "AutoCommit",
+ ref envConfig.AutoCommit, compulsory);
+ Configuration.ConfigBool(xmlElement, "CDB_ALLDB",
+ ref envConfig.CDB_ALLDB, compulsory);
+ Configuration.ConfigBool(xmlElement, "Create",
+ ref envConfig.Create, compulsory);
+ Configuration.ConfigString(xmlElement, "CreationDir",
+ ref envConfig.CreationDir, compulsory);
+ Configuration.ConfigStringList(xmlElement, "DataDirs",
+ ref envConfig.DataDirs, compulsory);
+ Configuration.ConfigString(xmlElement, "ErrorPrefix",
+ ref envConfig.ErrorPrefix, compulsory);
+ Configuration.ConfigBool(xmlElement, "ForceFlush",
+ ref envConfig.ForceFlush, compulsory);
+ Configuration.ConfigBool(xmlElement, "FreeThreaded",
+ ref envConfig.FreeThreaded, compulsory);
+ Configuration.ConfigBool(xmlElement, "InitRegions",
+ ref envConfig.InitRegions, compulsory);
+ Configuration.ConfigString(xmlElement, "IntermediateDirMode",
+ ref envConfig.IntermediateDirMode, compulsory);
+ Configuration.ConfigBool(xmlElement, "Lockdown",
+ ref envConfig.Lockdown, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "LockTimeout",
+ ref value, compulsory))
+ envConfig.LockTimeout = value;
+ if (Configuration.ConfigUint(xmlElement, "MaxTransactions",
+ ref value, compulsory))
+ envConfig.MaxTransactions = value;
+ Configuration.ConfigBool(xmlElement, "NoBuffer",
+ ref envConfig.NoBuffer, compulsory);
+ Configuration.ConfigBool(xmlElement, "NoLocking",
+ ref envConfig.NoLocking, compulsory);
+ Configuration.ConfigBool(xmlElement, "NoMMap",
+ ref envConfig.NoMMap, compulsory);
+ Configuration.ConfigBool(xmlElement, "NoLocking",
+ ref envConfig.NoLocking, compulsory);
+ Configuration.ConfigBool(xmlElement, "NoPanic",
+ ref envConfig.NoPanic, compulsory);
+ Configuration.ConfigBool(xmlElement, "Overwrite",
+ ref envConfig.Overwrite, compulsory);
+ Configuration.ConfigBool(xmlElement, "Private",
+ ref envConfig.Private, compulsory);
+ Configuration.ConfigBool(xmlElement, "Register",
+ ref envConfig.Register, compulsory);
+ Configuration.ConfigBool(xmlElement, "RunFatalRecovery",
+ ref envConfig.RunFatalRecovery, compulsory);
+ Configuration.ConfigBool(xmlElement, "RunRecovery",
+ ref envConfig.RunRecovery, compulsory);
+ Configuration.ConfigBool(xmlElement, "SystemMemory",
+ ref envConfig.SystemMemory, compulsory);
+ Configuration.ConfigString(xmlElement, "TempDir",
+ ref envConfig.TempDir, compulsory);
+ Configuration.ConfigBool(xmlElement, "TimeNotGranted",
+ ref envConfig.TimeNotGranted, compulsory);
+ Configuration.ConfigBool(xmlElement, "TxnNoSync",
+ ref envConfig.TxnNoSync, compulsory);
+ Configuration.ConfigBool(xmlElement, "TxnNoWait",
+ ref envConfig.TxnNoWait, compulsory);
+ Configuration.ConfigBool(xmlElement, "TxnSnapshot",
+ ref envConfig.TxnSnapshot, compulsory);
+ if (Configuration.ConfigDateTime(xmlElement, "TxnTimestamp",
+ ref time, compulsory))
+ envConfig.TxnTimestamp = time;
+ Configuration.ConfigBool(xmlElement, "TxnWriteNoSync",
+ ref envConfig.TxnWriteNoSync, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseLocking",
+ ref envConfig.UseLocking, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseLogging",
+ ref envConfig.UseLogging, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseMPool",
+ ref envConfig.UseMPool, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseMVCC",
+ ref envConfig.UseMVCC, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseReplication",
+ ref envConfig.UseReplication, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseTxns",
+ ref envConfig.UseTxns, compulsory);
+ envConfig.Verbosity = new VerboseMessages();
+ Configuration.ConfigVerboseMessages(xmlElement,
+ "Verbosity", ref envConfig.Verbosity, compulsory);
+ Configuration.ConfigBool(xmlElement, "YieldCPU",
+ ref envConfig.YieldCPU, compulsory);
+ }
+
+ [Test]
+ public void TestConfigLock()
+ {
+ testName = "TestConfigLock";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.LockSystemCfg = new LockingConfig();
+ LockingConfigTest.Config(xmlElem,
+ ref cfg.LockSystemCfg, true);
+ LockingConfigTest.Confirm(xmlElem,
+ cfg.LockSystemCfg, true);
+ }
+
+ [Test]
+ public void TestConfigLog()
+ {
+ testName = "TestConfigLog";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.LogSystemCfg = new LogConfig();
+ LogConfigTest.Config(xmlElem, ref cfg.LogSystemCfg, true);
+ LogConfigTest.Confirm(xmlElem, cfg.LogSystemCfg, true);
+ }
+
+ [Test]
+ public void TestConfigMutex()
+ {
+ testName = "TestConfigMutex";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.MutexSystemCfg = new MutexConfig();
+ MutexConfigTest.Config(xmlElem, ref cfg.MutexSystemCfg, true);
+ MutexConfigTest.Confirm(xmlElem, cfg.MutexSystemCfg, true);
+ }
+
+ [Test]
+ public void TestConfigReplication()
+ {
+ testName = "TestConfigReplication";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.RepSystemCfg = new ReplicationConfig();
+ ReplicationConfigTest.Config(xmlElem,
+ ref cfg.RepSystemCfg, true);
+ ReplicationConfigTest.Confirm(xmlElem,
+ cfg.RepSystemCfg, true);
+ }
+
+ [Test]
+ public void TestSetEncryption()
+ {
+ testName = "TestSetEncryption";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.SetEncryption("key", EncryptionAlgorithm.AES);
+ Assert.AreEqual("key", envConfig.EncryptionPassword);
+ Assert.AreEqual(EncryptionAlgorithm.AES, envConfig.EncryptAlgorithm);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ Assert.AreEqual(EncryptionAlgorithm.AES, env.EncryptAlgorithm);
+ env.Close();
+ }
+
+ }
+}
+
diff --git a/db-4.8.30/test/scr037/DatabaseEnvironmentTest.cs b/db-4.8.30/test/scr037/DatabaseEnvironmentTest.cs
new file mode 100644
index 0000000..570bdc2
--- /dev/null
+++ b/db-4.8.30/test/scr037/DatabaseEnvironmentTest.cs
@@ -0,0 +1,1778 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class DatabaseEnvironmentTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ private DatabaseEnvironment testBeginTransactionEnv;
+ private BTreeDatabase testBeginTransactionDB;
+
+ private DatabaseEnvironment testCheckpointEnv;
+ private BTreeDatabase testCheckpointDB;
+
+ private DatabaseEnvironment testDetectDeadlocksEnv;
+ private BTreeDatabase testDetectDeadlocksDB;
+
+ private DatabaseEnvironment testFailCheckEnv;
+
+ private EventWaitHandle signal;
+
+ [TestFixtureSetUp]
+ public void SetUp()
+ {
+ testFixtureName = "DatabaseEnvironmentTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ try
+ {
+ Configuration.ClearDir(testFixtureHome);
+ }
+ catch (Exception)
+ {
+ throw new TestException(
+ "Please clean the directory");
+ }
+ }
+
+ [Test]
+ public void TestArchivableDatabaseFiles()
+ {
+ testName = "TestArchivableDatabaseFiles";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName1 = testName + "1.db";
+ string dbFileName2 = testName + "2.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.AutoCommit = true;
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Open two databases.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db1 = BTreeDatabase.Open(dbFileName1, dbConfig);
+ db1.Close();
+ BTreeDatabase db2 = BTreeDatabase.Open(dbFileName2, dbConfig);
+ db2.Close();
+
+ /*
+ * Get all database files name in the environment.
+ * Two database file name should be returned and
+ * the same as the ones when opening the databases.
+ */
+ List<string> dbFiles = env.ArchivableDatabaseFiles(false);
+ Assert.AreEqual(2, dbFiles.Count);
+ Assert.IsTrue(dbFiles.Contains(dbFileName1));
+ Assert.IsTrue(dbFiles.Contains(dbFileName2));
+
+ /*
+ * Get all database file's abosolute path in the
+ * environment. Confirm those files exist.
+ */
+ List<string> dbFilesPath = env.ArchivableDatabaseFiles(true);
+ Assert.IsTrue(File.Exists(dbFilesPath[0]));
+ Assert.IsTrue(File.Exists(dbFilesPath[1]));
+
+ env.Close();
+ }
+
+ [Test]
+ public void TestArchivableLogFiles()
+ {
+ testName = "TestArchivableLogFiles";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.AutoCommit = true;
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Open a databases.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ dbFileName, dbConfig);
+
+ /*
+ * Put 1000 records into the database to generate
+ * more than one log files.
+ */
+ byte[] byteArr = new byte[1024];
+ for (int i = 0; i < 1000; i++)
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(byteArr));
+
+ db.Close();
+
+ List<string> logFiles = env.ArchivableLogFiles(false);
+
+ List<string> logFilesPath =
+ env.ArchivableLogFiles(true);
+ for (int i = 0; i < logFilesPath.Count; i++)
+ Assert.IsTrue(File.Exists(logFilesPath[i]));
+
+ env.Close();
+ }
+
+ [Test]
+ public void TestBeginCDSGroup()
+ {
+ testName = "TestBeginCDSGroup";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseCDB = true;
+ cfg.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
+ Transaction txn = env.BeginCDSGroup();
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ testName + ".db", dbConfig, txn);
+ db.Put(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data")), txn);
+ db.Close();
+ txn.Commit();
+ env.Close();
+ }
+
+ [Test]
+ public void TestBeginTransaction()
+ {
+ testName = "TestBeginTransaction";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ cfg.UseLocking = true;
+ cfg.NoLocking = false;
+ cfg.FreeThreaded = true;
+ testBeginTransactionEnv = DatabaseEnvironment.Open(testHome, cfg);
+ testBeginTransactionEnv.DeadlockResolution = DeadlockPolicy.OLDEST;
+
+ // Open btree database.
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.AutoCommit = true;
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = testBeginTransactionEnv;
+ dbConfig.Duplicates = DuplicatesPolicy.NONE;
+ dbConfig.FreeThreaded = true;
+ testBeginTransactionDB = BTreeDatabase.Open(
+ testName + ".db", dbConfig);
+
+ testBeginTransactionDB.Put(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+
+ // Begin two threads to run dead lock detection.
+ Thread thread1 = new Thread(new ThreadStart(
+ DeadLockThreadWithLockTimeOut));
+ Thread thread2 = new Thread(new ThreadStart(
+ DeadLockThreadWithTxnTimeout));
+ signal = new EventWaitHandle(false, EventResetMode.ManualReset);
+ thread1.Start();
+ thread2.Start();
+ Thread.Sleep(1000);
+ signal.Set();
+ thread1.Join();
+ thread2.Join();
+
+ // Close all.
+ testBeginTransactionDB.Close();
+ testBeginTransactionEnv.Close();
+ }
+
+ public void DeadLockThreadWithLockTimeOut()
+ {
+ // Configure and begin a transaction.
+ TransactionConfig txnConfig = new TransactionConfig();
+ txnConfig.LockTimeout = 5000;
+ txnConfig.Name = "DeadLockThreadWithLockTimeOut";
+ Transaction txn =
+ testBeginTransactionEnv.BeginTransaction(txnConfig, null);
+ try
+ {
+ testBeginTransactionDB.Put(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ signal.WaitOne();
+ testBeginTransactionDB.Put(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("newkey")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("newdata")),
+ txn);
+ txn.Commit();
+ }
+ catch (DeadlockException)
+ {
+ try
+ {
+ txn.Abort();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ }
+ catch (DatabaseException)
+ {
+ try
+ {
+ txn.Abort();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ }
+ }
+
+ public void DeadLockThreadWithTxnTimeout()
+ {
+ // Configure and begin a transaction.
+ TransactionConfig txnConfig = new TransactionConfig();
+ txnConfig.TxnTimeout = 5000;
+ txnConfig.Name = "DeadLockThreadWithTxnTimeout";
+ Transaction txn =
+ testBeginTransactionEnv.BeginTransaction(txnConfig, null);
+ try
+ {
+ testBeginTransactionDB.Put(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ signal.WaitOne();
+ testBeginTransactionDB.Put(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("newkey")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("newdata")),
+ txn);
+ txn.Commit();
+ }
+ catch (DeadlockException)
+ {
+ try
+ {
+ txn.Abort();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ }
+ catch (DatabaseException)
+ {
+ try
+ {
+ txn.Abort();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ }
+ }
+
+ [Test]
+ public void TestCheckpoint()
+ {
+ testName = "TestCheckpoint";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ cfg.UseLocking = true;
+ cfg.NoLocking = false;
+ cfg.FreeThreaded = true;
+ testCheckpointEnv = DatabaseEnvironment.Open(testHome, cfg);
+
+ // Open btree database.
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.AutoCommit = true;
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = testCheckpointEnv;
+ dbConfig.FreeThreaded = true;
+ testCheckpointDB = BTreeDatabase.Open(testName + ".db", dbConfig);
+
+
+ // Run a thread to put records into database.
+ Thread thread1 = new Thread(new ThreadStart(PutRecordsThread));
+
+ /*
+ * Run a thread to do checkpoint periodically and
+ * finally do a checkpoint to flush all in memory pool
+ * to log files.
+ */
+ Thread thread2 = new Thread(new ThreadStart(CheckpointThread));
+
+ thread1.Start();
+ thread2.Start();
+ thread1.Join();
+ thread2.Join();
+
+ // Close all.
+ testCheckpointDB.Close();
+ testCheckpointEnv.Close();
+ }
+
+ public void PutRecordsThread()
+ {
+ Transaction txn = testCheckpointEnv.BeginTransaction();
+ byte[] byteArr = new byte[1024];
+ for (int i = 0; i < 1000; i++)
+ testCheckpointDB.Put(
+ new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(byteArr), txn);
+ txn.Commit();
+ }
+
+ public void CheckpointThread()
+ {
+ uint bytes = 64;
+ uint minutes = 1;
+ uint count = 1;
+ while (count < 3)
+ {
+ testCheckpointEnv.Checkpoint(bytes, minutes);
+ count++;
+ }
+ Thread.Sleep(500);
+ testCheckpointEnv.Checkpoint();
+ }
+
+ [Test]
+ public void TestClose()
+ {
+ testName = "TestClose";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, cfg);
+ env.Close();
+ }
+
+ [Test]
+ public void TestConfigAll()
+ {
+ testName = "TestConfigAll";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ /*
+ * Open a new environment with all properties,
+ * fields and subsystems configured.
+ */
+ DatabaseEnvironmentConfig envConig =
+ new DatabaseEnvironmentConfig();
+ Config(xmlElem, ref envConig, true, true, true,
+ true, true, true);
+
+ // Configure with methods.
+ ReplicationHostAddress address =
+ new ReplicationHostAddress("127.0.0.0", 11111);
+ envConig.RepSystemCfg.Clockskew(102, 100);
+ envConig.RepSystemCfg.RetransmissionRequest(10, 100);
+ envConig.RepSystemCfg.TransmitLimit(1, 1024);
+
+ // Open the environment.
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConig);
+
+ // Confirm environment status with its configuration.
+ Confirm(xmlElem, env, true, true, true, true, true, true);
+
+ // Print statistics of the current environment.
+ env.PrintStats(true, true);
+
+ // Print statistics of all subsytems.
+ env.PrintSubsystemStats(true, true);
+
+ env.Close();
+ }
+
+ [Test]
+ public void TestDeadlockPolicy()
+ {
+ testName = "TestDeadlockPolicy";
+ testHome = testFixtureHome + "/" + testName;
+
+ DetectDeadlockPolicy(testHome + "_DEFAULT",
+ DeadlockPolicy.DEFAULT);
+
+ DetectDeadlockPolicy(testHome + "_EXPIRE",
+ DeadlockPolicy.EXPIRE);
+ DetectDeadlockPolicy(testHome + "_MAX_LOCKS",
+ DeadlockPolicy.MAX_LOCKS);
+ DetectDeadlockPolicy(testHome + "_MAX_WRITE",
+ DeadlockPolicy.MAX_WRITE);
+ DetectDeadlockPolicy(testHome + "_MIN_LOCKS",
+ DeadlockPolicy.MIN_LOCKS);
+ DetectDeadlockPolicy(testHome + "_MIN_WRITE",
+ DeadlockPolicy.MIN_WRITE);
+ DetectDeadlockPolicy(testHome + "_OLDEST",
+ DeadlockPolicy.OLDEST);
+ DetectDeadlockPolicy(testHome + "_RANDOM",
+ DeadlockPolicy.RANDOM);
+ DetectDeadlockPolicy(testHome + "_YOUNGEST",
+ DeadlockPolicy.YOUNGEST);
+ }
+
+ public void DetectDeadlockPolicy(
+ string home, DeadlockPolicy deadlock)
+ {
+ Configuration.ClearDir(home);
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLocking = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+ env.DeadlockResolution = deadlock;
+ Assert.AreEqual(deadlock, env.DeadlockResolution);
+ env.DetectDeadlocks(deadlock);
+ env.Close();
+ }
+
+ [Test]
+ public void TestDetectDeadlocks()
+ {
+ testName = "TestDetectDeadlocks";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ cfg.UseLocking = true;
+ cfg.FreeThreaded = true;
+ testDetectDeadlocksEnv = DatabaseEnvironment.Open(
+ testHome, cfg);
+
+ // Open btree database.
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.AutoCommit = true;
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = testDetectDeadlocksEnv;
+ dbConfig.Duplicates = DuplicatesPolicy.NONE;
+ dbConfig.FreeThreaded = true;
+ testDetectDeadlocksDB = BTreeDatabase.Open(
+ testName + ".db", dbConfig);
+
+ // Put one record("key", "data") into database.
+ testDetectDeadlocksDB.Put(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+
+ // Begin two threads to read and write record.
+ Thread thread1 = new Thread(new ThreadStart(ReadAndPutRecordThread));
+ Thread thread2 = new Thread(new ThreadStart(ReadAndPutRecordThread));
+ signal = new EventWaitHandle(false, EventResetMode.ManualReset);
+ thread1.Start();
+ thread2.Start();
+
+ // Give enough time for threads to read record.
+ Thread.Sleep(1000);
+
+ /*
+ * Let the two threads apply for write lock
+ * synchronously.
+ */
+ signal.Set();
+
+ // Confirm that there is deadlock in the environment.
+ Thread.Sleep(1000);
+ uint deadlockNum = testDetectDeadlocksEnv.DetectDeadlocks(
+ DeadlockPolicy.DEFAULT);
+ Assert.Less(0, deadlockNum);
+
+ thread1.Join();
+ thread2.Join();
+
+ // Close all.
+ testDetectDeadlocksDB.Close(false);
+ testDetectDeadlocksEnv.Close();
+ }
+
+ public void ReadAndPutRecordThread()
+ {
+ Transaction txn =
+ testDetectDeadlocksEnv.BeginTransaction();
+ try
+ {
+ testDetectDeadlocksDB.GetBoth(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")), txn);
+ signal.WaitOne();
+ testDetectDeadlocksDB.Put(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("newKey")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("newData")),
+ txn);
+ txn.Commit();
+ }
+ catch (DeadlockException)
+ {
+ txn.Abort();
+ }
+ }
+
+ [Test]
+ public void TestFailCheck()
+ {
+ testName = "TestFailCheck";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ cfg.UseLocking = true;
+ cfg.FreeThreaded = true;
+ cfg.ThreadIsAlive = new ThreadIsAliveDelegate(ThrdAlive);
+ cfg.SetThreadID = new SetThreadIDDelegate(SetThrdID);
+ cfg.ThreadCount = 10;
+ testFailCheckEnv = DatabaseEnvironment.Open(testHome, cfg);
+
+ Thread thread = new Thread(new ThreadStart(WriteThreadWithoutTxnCommit));
+ thread.Start();
+ thread.Join();
+ testFailCheckEnv.FailCheck();
+ testFailCheckEnv.Close();
+ }
+
+ public void WriteThreadWithoutTxnCommit()
+ {
+ Transaction txn = testFailCheckEnv.BeginTransaction();
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = testFailCheckEnv;
+ BTreeDatabase db = BTreeDatabase.Open("TestFailCheck.db", dbConfig, txn);
+ db.Close();
+ txn.Commit();
+ }
+
+ public bool ThrdAlive(DbThreadID info, bool procOnly)
+ {
+ Process pcs = Process.GetProcessById(info.processID);
+ if (pcs.HasExited == true)
+ return false;
+ else if (procOnly)
+ return true;
+ ProcessThreadCollection thrds = pcs.Threads;
+ foreach (ProcessThread pcsThrd in thrds)
+ {
+ if (pcsThrd.Id == info.threadID)
+ {
+ /*
+ * We have to use the fully qualified name, ThreadState
+ * defaults to System.Threading.ThreadState.
+ */
+ return (pcsThrd.ThreadState !=
+ System.Diagnostics.ThreadState.Terminated);
+ }
+ }
+ // If we can't find the thread, we say it's not alive
+ return false;
+ }
+
+ public DbThreadID SetThrdID()
+ {
+ DbThreadID threadID;
+
+ int pid = Process.GetCurrentProcess().Id;
+ uint tid = (uint)AppDomain.GetCurrentThreadId();
+ threadID = new DbThreadID(pid, tid);
+ return threadID;
+ }
+
+ [Test]
+ public void TestFeedback()
+ {
+ testName = "TestFeedback";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open the environment.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.AutoCommit = true;
+ cfg.UseLocking = true;
+ cfg.UseLogging = true;
+ cfg.UseMPool = true;
+ cfg.UseTxns = true;
+ cfg.Create = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
+
+ env.Feedback = new EnvironmentFeedbackDelegate(
+ EnvRecovery10PercentFeedback);
+ env.Feedback(EnvironmentFeedbackEvent.RECOVERY, 10);
+
+ env.Close();
+ }
+
+ public void EnvRecovery10PercentFeedback(
+ EnvironmentFeedbackEvent opcode, int percent)
+ {
+ Assert.AreEqual(opcode, EnvironmentFeedbackEvent.RECOVERY);
+ Assert.AreEqual(10, percent);
+ }
+
+ [Test]
+ public void TestMutexSystemStats()
+ {
+ testName = "TestMutexSystemStats";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseLogging = true;
+ cfg.UseLocking = true;
+ cfg.UseMPool = true;
+ cfg.UseTxns = true;
+ cfg.MutexSystemCfg = new MutexConfig();
+ cfg.MutexSystemCfg.Alignment = 512;
+ cfg.MutexSystemCfg.Increment = 128;
+ cfg.MutexSystemCfg.MaxMutexes = 150;
+ cfg.MutexSystemCfg.NumTestAndSetSpins = 10;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
+
+ MutexStats stats = env.MutexSystemStats();
+ env.PrintMutexSystemStats(true, true);
+ Assert.AreEqual(512, stats.Alignment);
+ Assert.AreEqual(stats.Count, stats.Available + stats.InUse);
+ Assert.LessOrEqual(stats.InUse, stats.MaxInUse);
+ Assert.AreNotEqual(0, stats.RegionSize);
+ Assert.AreEqual(0, stats.RegionWait);
+ Assert.AreEqual(10, stats.TASSpins);
+ ulong regionNoWait = stats.RegionNoWait;
+
+ BTreeDatabaseConfig dbCfg = new BTreeDatabaseConfig();
+ dbCfg.Creation = CreatePolicy.IF_NEEDED;
+ dbCfg.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(testName + ".db", dbCfg);
+ for (int i = 0; i < 1000; i++)
+ {
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+ stats = env.MutexSystemStats();
+ }
+ Assert.LessOrEqual(regionNoWait, stats.RegionNoWait);
+ regionNoWait = stats.RegionNoWait;
+
+ stats = env.MutexSystemStats(true);
+ env.PrintMutexSystemStats();
+ stats = env.MutexSystemStats();
+ Assert.GreaterOrEqual(regionNoWait, stats.RegionNoWait);
+
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestLogFile()
+ {
+ testName = "TestLogFile";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open environment and configure logging subsystem.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.AutoCommit = true;
+ cfg.UseLocking = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 1048576, 1);
+ cfg.LogSystemCfg = new LogConfig();
+ cfg.LogSystemCfg.AutoRemove = false;
+ cfg.LogSystemCfg.BufferSize = 10240;
+ cfg.LogSystemCfg.Dir = "./";
+ cfg.LogSystemCfg.FileMode = 755;
+ cfg.LogSystemCfg.ForceSync = true;
+ cfg.LogSystemCfg.InMemory = false;
+ cfg.LogSystemCfg.MaxFileSize = 1048576;
+ cfg.LogSystemCfg.NoBuffer = false;
+ cfg.LogSystemCfg.RegionSize = 204800;
+ cfg.LogSystemCfg.ZeroOnCreate = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
+
+ // Open database.
+ Transaction allTxn = env.BeginTransaction();
+ TransactionConfig txnConfig = new TransactionConfig();
+ txnConfig.Name = "OpenTransaction";
+ Transaction openTxn = env.BeginTransaction(txnConfig, allTxn);
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ testName + ".db", dbConfig, openTxn);
+
+ List<ActiveTransaction> activeTxns =
+ env.TransactionSystemStats().Transactions;
+ for (int i = 0; i < activeTxns.Count; i++)
+ if (activeTxns[i].Name == "OpenTransaction")
+ {
+ LSN lsn = new LSN(
+ activeTxns[i].Begun.LogFileNumber,
+ activeTxns[i].Begun.Offset);
+ env.LogFlush(lsn);
+ string fileName = env.LogFile(lsn);
+ }
+
+ openTxn.Commit();
+
+ // Write "##" to log before putting data into database.
+ env.WriteToLog("##");
+
+ // Write 1000 records into database.
+ TransactionConfig writeTxnConfig = new TransactionConfig();
+ writeTxnConfig.Name = "WriteTxn";
+ Transaction writeTxn = env.BeginTransaction(writeTxnConfig);
+ byte[] byteArr = new byte[1024];
+ for (int i = 0; i < 1000; i++)
+ {
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(byteArr), writeTxn);
+ env.LogFlush();
+ env.WriteToLog("#" + i.ToString(), writeTxn);
+ }
+
+ activeTxns = env.TransactionSystemStats().Transactions;
+ for (int i = 0; i < activeTxns.Count; i++)
+ if (activeTxns[i].Name == "WriteTxn")
+ {
+ LSN lsn = new LSN(
+ activeTxns[i].Begun.LogFileNumber,
+ activeTxns[i].Begun.Offset);
+ env.LogFlush(lsn);
+ string fileName = env.LogFile(lsn);
+ }
+
+
+ writeTxn.Commit();
+ db.Close();
+
+ // Write "##" after data has been put.
+ env.WriteToLog("##");
+
+ List<string> logFiles = env.LogFiles(true);
+
+ env.LogWrite(new DatabaseEntry(), true);
+
+ env.RemoveUnusedLogFiles();
+
+ allTxn.Commit();
+ env.Close();
+ }
+
+ [Test]
+ public void TestOpen()
+ {
+ testName = "TestOpen";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
+
+ // Confirm that the environment is initialized.
+ Assert.IsNotNull(env);
+
+ // Confirm the environment home directory.
+ Assert.AreEqual(testHome, env.Home);
+
+ // Print statistics of the current environment.
+ env.PrintStats();
+
+ // Print statistics of all subsytems.
+ env.PrintSubsystemStats();
+
+ env.Close();
+ }
+
+ [Test]
+ public void TestMPoolSystemStats()
+ {
+ testName = "TestMPoolSystemStats";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.AutoCommit = true;
+ envConfig.MPoolSystemCfg = new MPoolConfig();
+ envConfig.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 1048576, 3);
+ envConfig.Create = true;
+ envConfig.UseLocking = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.UseLogging = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ MPoolStats stats = env.MPoolSystemStats();
+ env.PrintMPoolSystemStats();
+
+ Assert.AreEqual(0, stats.BlockedOperations);
+ Assert.AreEqual(0, stats.BucketsCheckedDuringAlloc);
+ Assert.AreEqual(3, stats.CacheRegions);
+ Assert.LessOrEqual(1048576, stats.CacheSettings.Bytes);
+ Assert.AreEqual(0, stats.CacheSettings.Gigabytes);
+ Assert.AreEqual(3, stats.CacheSettings.NCaches);
+ Assert.AreEqual(0, stats.CleanPages);
+ Assert.AreEqual(0, stats.CleanPagesEvicted);
+ Assert.AreEqual(0, stats.DirtyPages);
+ Assert.AreEqual(0, stats.DirtyPagesEvicted);
+ Assert.IsNotNull(stats.Files);
+ Assert.AreEqual(0, stats.FrozenBuffers);
+ Assert.AreEqual(0, stats.FrozenBuffersFreed);
+ Assert.LessOrEqual(37, stats.HashBuckets);
+ Assert.LessOrEqual(0, stats.HashChainSearches);
+ Assert.AreEqual(0, stats.HashEntriesSearched);
+ Assert.AreEqual(0, stats.HashLockNoWait);
+ Assert.AreEqual(0, stats.HashLockWait);
+ Assert.AreEqual(0, stats.LongestHashChainSearch);
+ Assert.AreEqual(0, stats.MappedPages);
+ Assert.AreEqual(0, stats.MaxBucketsCheckedDuringAlloc);
+ Assert.AreEqual(0, stats.MaxBufferWrites);
+ Assert.AreEqual(0, stats.MaxBufferWritesSleep);
+ Assert.AreEqual(0, stats.MaxHashLockNoWait);
+ Assert.AreEqual(0, stats.MaxHashLockWait);
+ Assert.AreEqual(0, stats.MaxMMapSize);
+ Assert.AreEqual(0, stats.MaxOpenFileDescriptors);
+ Assert.AreEqual(0, stats.MaxPagesCheckedDuringAlloc);
+ Assert.AreEqual(0, stats.PageAllocations);
+ Assert.AreEqual(0, stats.Pages);
+ Assert.AreEqual(0, stats.PagesCheckedDuringAlloc);
+ Assert.LessOrEqual(0, stats.PagesCreatedInCache);
+ Assert.AreEqual(0, stats.PagesInCache);
+ Assert.AreEqual(0, stats.PagesNotInCache);
+ Assert.AreEqual(0, stats.PagesRead);
+ Assert.AreEqual(0, stats.PagesTrickled);
+ Assert.AreEqual(0, stats.PagesWritten);
+ Assert.AreNotEqual(0, stats.RegionLockNoWait);
+ Assert.AreEqual(0, stats.RegionLockWait);
+ Assert.LessOrEqual(0, stats.RegionSize);
+ Assert.AreEqual(0, stats.ThawedBuffers);
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ dbConfig.PageSize = 4096;
+ BTreeDatabase db = BTreeDatabase.Open(
+ testName + ".db", dbConfig);
+
+ byte[] largeByte = new byte[1088576];
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(largeByte));
+ db.Put(new DatabaseEntry(largeByte), new DatabaseEntry(largeByte));
+
+ db.Close();
+
+ // Clean the stats after printing.
+ stats = env.MPoolSystemStats(true);
+ env.PrintMPoolSystemStats(true, true);
+ stats = env.MPoolSystemStats();
+ env.PrintMPoolSystemStats(true, true, true);
+
+ env.Close();
+ }
+
+ [Test]
+ public void TestRemove()
+ {
+ testName = "TestRemove";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open new environment.
+ DatabaseEnvironmentConfig envConig =
+ new DatabaseEnvironmentConfig();
+ envConig.Create = true;
+ envConig.ErrorPrefix = testFixtureName + ":" + testName;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConig);
+ env.Close();
+
+ // Remove the existing environment.
+ DatabaseEnvironment.Remove(testHome);
+
+ // Confirm that the __db.001 is removed.
+ Assert.IsFalse(File.Exists(testHome + "__db.001"));
+ }
+
+ [Test]
+ public void TestRemoveCorruptedEnv()
+ {
+ testName = "TestRemoveCorruptedEnv";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open new environment.
+ DatabaseEnvironmentConfig envConig =
+ new DatabaseEnvironmentConfig();
+ envConig.Create = true;
+ envConig.ErrorPrefix = testFixtureName + ":" + testName;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, envConig);
+
+ // Panic the environment.
+ env.Panic();
+
+ // Remove the corrupted environment.
+ DatabaseEnvironment.Remove(testHome, true);
+
+ // Confirm that the __db.001 is removed.
+ Assert.IsFalse(File.Exists(testHome + "__db.001"));
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestRenameDB()
+ {
+ testName = "TestRenameDB";
+ testHome = testFixtureHome + "/" + testName;
+
+ RenameDB(testHome, testName, false);
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestRenameDBWithTxn()
+ {
+ testName = "TestRenameDBWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ RenameDB(testHome, testName, true);
+ }
+
+ public void RenameDB(string home, string name, bool ifTxn)
+ {
+ string dbFileName = name + ".db";
+ string dbName = "db1";
+ string dbNewName = "db2";
+
+ Configuration.ClearDir(home);
+
+ DatabaseEnvironmentConfig envConig =
+ new DatabaseEnvironmentConfig();
+ envConig.Create = true;
+ envConig.UseTxns = true;
+ envConig.UseLogging = true;
+ envConig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConig);
+
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ dbFileName, dbName, dbConfig, openTxn);
+ db.Close();
+ openTxn.Commit();
+
+ // Open the database.
+ if (ifTxn == false)
+ env.RenameDB(dbFileName, dbName, dbNewName, true);
+ else
+ {
+ Transaction renameTxn = env.BeginTransaction();
+ env.RenameDB(dbFileName, dbName, dbNewName, false, renameTxn);
+ renameTxn.Commit();
+ }
+
+ // Confirm that the database are renamed.
+ Transaction reopenTxn = env.BeginTransaction();
+ try
+ {
+ Database db1 = Database.Open(
+ dbFileName, new DatabaseConfig());
+ db1.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ reopenTxn.Commit();
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestResetFileID()
+ {
+ testName = "TestResetFileID";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbNewFileName = testName + "_new.db";
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Opening a new database.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ dbFileName, dbConfig);
+ db.Close();
+
+ // Copy the physical database file.
+ File.Copy(testHome + "/" + dbFileName,
+ testHome + "/" + dbNewFileName);
+
+ // Reset the file ID.
+ env.ResetFileID(dbNewFileName, false);
+
+ // Open the exisiting database in copied database file.
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.NEVER;
+ cfg.Env = env;
+ BTreeDatabase newDB = BTreeDatabase.Open(
+ dbNewFileName, cfg);
+ newDB.Close();
+ env.Close();
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestRemoveDB()
+ {
+ testName = "TestRemoveDB";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ RmDBWithoutTxn(testHome, testName, false);
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestRemoveDBWithAutoCommit()
+ {
+ testName = "TestRemoveDBWithAutoCommit";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ RmDBWithoutTxn(testHome, testName, true);
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestRemoveDBWithinTxn()
+ {
+ testName = "TestRemoveDBWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbName1 = testName + "1";
+ string dbName2 = testName + "2";
+
+ Configuration.ClearDir(testHome);
+
+ // Open environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.UseLogging = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ Transaction txn = env.BeginTransaction();
+
+ // Create two databases in the environment.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase btreeDB1 = BTreeDatabase.Open(
+ dbFileName, dbName1, dbConfig, txn);
+ btreeDB1.Close();
+ BTreeDatabase btreeDB2 = BTreeDatabase.Open(
+ dbFileName, dbName2, dbConfig, txn);
+ btreeDB2.Close();
+
+ // Remove one database from the environment.
+ env.RemoveDB(dbFileName, dbName2, false, txn);
+
+ // Try to open the existing database.
+ DatabaseConfig cfg = new DatabaseConfig();
+ cfg.Env = env;
+ Database db1 = Database.Open(dbFileName, dbName1, cfg, txn);
+ db1.Close();
+
+ /*
+ * Attempting to open the removed database should
+ * cause error.
+ */
+ try
+ {
+ Database db2 = Database.Open(
+ dbFileName, dbName2, cfg, txn);
+ db2.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ txn.Commit();
+ env.Close();
+ }
+ }
+
+ public void RmDBWithoutTxn(string home, string dbName,
+ bool ifAutoCommit)
+ {
+ string dbFileName = dbName + ".db";
+ string dbName1 = dbName + "1";
+ string dbName2 = dbName + "2";
+
+ // Open environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ if (ifAutoCommit == true)
+ {
+ envConfig.AutoCommit = true;
+ envConfig.UseTxns = true;
+ }
+
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ // Create two databases in the environment.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase btreeDB1 = BTreeDatabase.Open(
+ dbFileName, dbName1, dbConfig);
+ btreeDB1.Close();
+ BTreeDatabase btreeDB2 = BTreeDatabase.Open(
+ dbFileName, dbName2, dbConfig);
+ btreeDB2.Close();
+
+ // Remove one database from the environment.
+ env.RemoveDB(dbFileName, dbName2, false);
+
+ // Try to open the existing database.
+ DatabaseConfig cfg = new DatabaseConfig();
+ cfg.Env = env;
+ Database db1 = Database.Open(dbFileName, dbName1, cfg);
+ db1.Close();
+
+ /*
+ * Attempting to open the removed database should
+ * cause error.
+ */
+ try
+ {
+ Database db2 = Database.Open(
+ dbFileName, dbName2, cfg);
+ db2.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+
+ [Test]
+ public void TestTransactionSystemStats()
+ {
+ testName = "TestTransactionSystemStats";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ TransactionStats stats;
+ BTreeDatabase db;
+ Transaction openTxn = null;
+
+ // Open an environment.
+ long dateTime;
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.MaxTransactions = 50;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.TxnNoSync = false;
+ envConfig.TxnNoWait = true;
+ envConfig.TxnSnapshot = true;
+ envConfig.TxnTimestamp = DateTime.Now;
+ envConfig.TxnWriteNoSync = false;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ try
+ {
+ try
+ {
+ // Confirm initial transaction subsystem statistics.
+ stats = env.TransactionSystemStats();
+ env.PrintTransactionSystemStats(true, true);
+ Assert.AreEqual(0, stats.Aborted);
+ Assert.AreEqual(0, stats.Active);
+ Assert.AreEqual(0, stats.Begun);
+ Assert.AreEqual(0, stats.Committed);
+ Assert.AreEqual(0, stats.LastCheckpoint.LogFileNumber);
+ Assert.AreEqual(0, stats.LastCheckpoint.Offset);
+ Assert.AreEqual(50, stats.MaxTransactions);
+ Assert.AreNotEqual(0, stats.RegionSize);
+ Assert.AreEqual(0, stats.Transactions.Count);
+ }
+ catch (AssertionException e)
+ {
+ throw e;
+ }
+
+ try
+ {
+ //Begin a transaction called openTxn and open a database.
+ TransactionConfig openTxnCfg = new TransactionConfig();
+ openTxnCfg.Name = "openTxn";
+ openTxn = env.BeginTransaction(openTxnCfg);
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ db = BTreeDatabase.Open(testName + ".db", dbConfig, openTxn);
+ }
+ catch (DatabaseException e)
+ {
+ if (openTxn != null)
+ openTxn.Abort();
+ throw e;
+ }
+
+ try
+ {
+ // At least there is one transaction that is alive.
+ env.Checkpoint();
+ stats = env.TransactionSystemStats();
+ env.PrintTransactionSystemStats();
+ Assert.AreNotEqual(0, stats.Active);
+ Assert.AreNotEqual(0, stats.Transactions.Count);
+ Assert.AreNotEqual(0, stats.Transactions.Capacity);
+ Assert.AreNotEqual(0, stats.RegionLockNoWait);
+ dateTime = stats.LastCheckpointTime;
+
+ // Begin an embedded transaction called putTxn.
+ TransactionConfig putTxnCfg =
+ new TransactionConfig();
+ putTxnCfg.Name = "putTxn";
+ putTxnCfg.NoWait = false;
+ Transaction putTxn = env.BeginTransaction(
+ putTxnCfg, openTxn);
+
+ try
+ {
+ // Put some records into database within putTxn.
+ for (int i = 0; i < 50; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)), putTxn);
+ stats = env.TransactionSystemStats();
+ Assert.AreNotEqual(0, stats.MaxActive);
+ Assert.AreNotEqual(0, stats.MaxTransactions);
+ Assert.AreEqual(0, stats.MaxSnapshot);
+ Assert.AreEqual(0, stats.Snapshot);
+ Assert.AreEqual(stats.Begun,
+ stats.Aborted + stats.Active + stats.Committed);
+ Assert.AreEqual(2, stats.Transactions.Count);
+
+ /*
+ * Both of LogFileNumber and Offset in active transaction
+ * couldn't be 0.
+ */
+ uint logFileNumbers = 0;
+ uint offSets = 0;
+ for (int i = 0; i < stats.Transactions.Count;i++)
+ {
+ logFileNumbers += stats.Transactions[i].Begun.LogFileNumber;
+ offSets += stats.Transactions[i].Begun.Offset;
+ }
+ Assert.AreNotEqual(0, logFileNumbers);
+ Assert.AreNotEqual(0, offSets);
+
+ // All active transactions are run by the same process and thread.
+
+ Assert.AreEqual(stats.Transactions[0].ThreadID,
+ stats.Transactions[1].ThreadID);
+ Assert.AreEqual(stats.Transactions[0].ProcessID,
+ stats.Transactions[1].ProcessID);
+
+ // All transactions are alive.
+ Assert.AreEqual(ActiveTransaction.TransactionStatus.RUNNING,
+ stats.Transactions[0].Status);
+ Assert.AreEqual(ActiveTransaction.TransactionStatus.RUNNING,
+ stats.Transactions[1].Status);
+
+ /*
+ * Find the openTxn in active transactions, which is the
+ * parent transaction of putTxn.
+ */
+ int parentPos = 0;
+ if (stats.Transactions[0].Name == "putTxn")
+ parentPos = 1;
+
+ // putTxn's parent id should be the openTxn.
+ Assert.AreEqual(stats.Transactions[parentPos].ID,
+ stats.Transactions[1 - parentPos].ParentID);
+
+ // Other stats should be an positive integer.
+ for (int i = 0; i < stats.Transactions.Count - 1; i++)
+ {
+ Assert.LessOrEqual(0,
+ stats.Transactions[i].BufferCopiesInCache);
+ Assert.LessOrEqual(0,
+ stats.Transactions[i].SnapshotReads.LogFileNumber);
+ Assert.LessOrEqual(0,
+ stats.Transactions[i].SnapshotReads.Offset);
+ Assert.IsNotNull(stats.Transactions[i].GlobalID);
+ }
+
+ // Commit putTxn.
+ putTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ putTxn.Abort();
+ throw e;
+ }
+
+ stats = env.TransactionSystemStats();
+ Assert.AreNotEqual(0, stats.LastCheckpoint.LogFileNumber);
+ Assert.AreNotEqual(0, stats.LastCheckpoint.Offset);
+ Assert.AreEqual(dateTime, stats.LastCheckpointTime);
+
+ openTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ openTxn.Abort();
+ throw e;
+ }
+ finally
+ {
+ db.Close();
+ }
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ /*
+ * Configure an environment. Here only configure those that could be
+ * set before environment open.
+ */
+ public void Config(XmlElement xmlElem,
+ ref DatabaseEnvironmentConfig envConfig, bool compulsory,
+ bool logging, bool locking, bool mutex, bool mpool, bool replication)
+ {
+ XmlNode childNode;
+
+ // Configure environment without any subsystems.
+ DatabaseEnvironmentConfigTest.Config(xmlElem, ref envConfig, compulsory);
+
+ // Configure environment with logging subsystem.
+ if (logging == true)
+ {
+ childNode = XMLReader.GetNode(xmlElem, "LogConfig");
+ envConfig.LogSystemCfg = new LogConfig();
+ LogConfigTest.Config((XmlElement)childNode,
+ ref envConfig.LogSystemCfg, compulsory);
+ }
+
+ // Configure environment with locking subsystem.
+ if (locking == true)
+ {
+ childNode = XMLReader.GetNode(xmlElem, "LockingConfig");
+ envConfig.LockSystemCfg = new LockingConfig();
+ LockingConfigTest.Config((XmlElement)childNode,
+ ref envConfig.LockSystemCfg, compulsory);
+ }
+
+ // Configure environment with mutex subsystem.
+ if (mutex == true)
+ {
+ childNode = XMLReader.GetNode(xmlElem, "MutexConfig");
+ envConfig.MutexSystemCfg = new MutexConfig();
+ MutexConfigTest.Config((XmlElement)childNode,
+ ref envConfig.MutexSystemCfg, compulsory);
+ }
+
+ if (mpool == true)
+ {
+ childNode = XMLReader.GetNode(xmlElem, "MPoolConfig");
+ envConfig.MPoolSystemCfg = new MPoolConfig();
+ MPoolConfigTest.Config((XmlElement)childNode,
+ ref envConfig.MPoolSystemCfg, compulsory);
+ }
+
+ // Configure environment with replication.
+ if (replication == true)
+ {
+ childNode = XMLReader.GetNode(xmlElem, "ReplicationConfig");
+ envConfig.RepSystemCfg = new ReplicationConfig();
+ ReplicationConfigTest.Config((XmlElement)childNode,
+ ref envConfig.RepSystemCfg, compulsory);
+ }
+ }
+
+ /*
+ * Confirm the fields/properties in the environment.
+ * Those set by setting functions are not checked here.
+ */
+ public static void Confirm(XmlElement xmlElement,
+ DatabaseEnvironment env, bool compulsory,
+ bool logging, bool locking, bool mutex, bool mpool,
+ bool replication)
+ {
+ XmlElement childElem;
+ CacheInfo cacheInfo = new CacheInfo(0, 0, 0);
+
+ // Confirm environment configuration.
+ Configuration.ConfirmBool(xmlElement, "AutoCommit",
+ env.AutoCommit, compulsory);
+ Configuration.ConfirmBool(xmlElement, "CDB_ALLDB",
+ env.CDB_ALLDB, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Create",
+ env.Create, compulsory);
+ Configuration.ConfirmStringList(xmlElement, "DataDirs",
+ env.DataDirs, compulsory);
+ Configuration.ConfirmString(xmlElement, "ErrorPrefix",
+ env.ErrorPrefix, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ForceFlush",
+ env.ForceFlush, compulsory);
+ Configuration.ConfirmBool(xmlElement, "FreeThreaded",
+ env.FreeThreaded, compulsory);
+ Configuration.ConfirmBool(xmlElement, "InitRegions",
+ env.InitRegions, compulsory);
+ Configuration.ConfirmString(xmlElement, "IntermediateDirMode",
+ env.IntermediateDirMode, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Lockdown",
+ env.Lockdown, compulsory);
+ Configuration.ConfirmUint(xmlElement, "LockTimeout",
+ env.LockTimeout, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MaxTransactions",
+ env.MaxTransactions, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoBuffer",
+ env.NoBuffer, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoLocking",
+ env.NoLocking, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoMMap",
+ env.NoMMap, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoPanic",
+ env.NoPanic, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Overwrite",
+ env.Overwrite, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Private",
+ env.Private, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Register",
+ env.Register, compulsory);
+ Configuration.ConfirmBool(xmlElement, "RunFatalRecovery",
+ env.RunFatalRecovery, compulsory);
+ Configuration.ConfirmBool(xmlElement, "RunRecovery",
+ env.RunRecovery, compulsory);
+ Configuration.ConfirmBool(xmlElement, "SystemMemory",
+ env.SystemMemory, compulsory);
+ Configuration.ConfirmString(xmlElement, "TempDir",
+ env.TempDir, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TimeNotGranted",
+ env.TimeNotGranted, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnNoSync",
+ env.TxnNoSync, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnNoWait",
+ env.TxnNoWait, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnSnapshot",
+ env.TxnSnapshot, compulsory);
+ Configuration.ConfirmDateTime(xmlElement, "TxnTimestamp",
+ env.TxnTimestamp, compulsory);
+ Configuration.ConfirmBool(xmlElement, "TxnWriteNoSync",
+ env.TxnWriteNoSync, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseMVCC",
+ env.UseMVCC, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseCDB",
+ env.UsingCDB, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseLocking",
+ env.UsingLocking, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseLogging",
+ env.UsingLogging, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseMPool",
+ env.UsingMPool, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseReplication",
+ env.UsingReplication, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseTxns",
+ env.UsingTxns, compulsory);
+ env.Verbosity = new VerboseMessages();
+ Configuration.ConfirmVerboseMessages(xmlElement,
+ "Verbosity", env.Verbosity, compulsory);
+ Configuration.ConfirmBool(xmlElement, "YieldCPU",
+ env.YieldCPU, compulsory);
+
+ /*
+ * If the locking subsystem is set, check the
+ * field/properties set in LockingConfig.
+ */
+ if (locking == true)
+ {
+ childElem = (XmlElement)XMLReader.GetNode(
+ xmlElement, "LockingConfig");
+ Configuration.ConfirmByteMatrix(childElem,
+ "Conflicts", env.LockConflictMatrix,
+ compulsory);
+ Configuration.ConfirmDeadlockPolicy(
+ childElem, "DeadlockResolution",
+ env.DeadlockResolution, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "Partitions", env.LockPartitions,
+ compulsory);
+ Configuration.ConfirmUint(childElem,
+ "MaxLockers", env.MaxLockers, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "MaxLocks", env.MaxLocks, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "MaxObjects", env.MaxObjects, compulsory);
+ }
+
+ /*
+ * If the locking subsystem is set, check the
+ * field/properties set in LogConfig.
+ */
+ if (logging == true)
+ {
+ childElem = (XmlElement)XMLReader.GetNode(
+ xmlElement, "LogConfig");
+ Configuration.ConfirmBool(childElem,
+ "AutoRemove", env.LogAutoRemove,
+ compulsory);
+ Configuration.ConfirmUint(childElem,
+ "BufferSize", env.LogBufferSize,
+ compulsory);
+ Configuration.ConfirmString(childElem,
+ "Dir", env.LogDir, compulsory);
+ Configuration.ConfirmInt(childElem,
+ "FileMode", env.LogFileMode, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "ForceSync", env.LogForceSync, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "InMemory", env.LogInMemory, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "NoBuffer", env.LogNoBuffer, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "RegionSize", env.LogRegionSize,
+ compulsory);
+ Configuration.ConfirmBool(childElem,
+ "ZeroOnCreate", env.LogZeroOnCreate,
+ compulsory);
+ Configuration.ConfirmUint(childElem,
+ "MaxFileSize", env.MaxLogFileSize,
+ compulsory);
+ }
+
+ /*
+ * If the locking subsystem is set, check the
+ * field/properties set in MutexConfig.
+ */
+ if (mutex == true)
+ {
+ childElem = (XmlElement)XMLReader.GetNode(
+ xmlElement, "MutexConfig");
+ Configuration.ConfirmUint(childElem,
+ "Alignment", env.MutexAlignment,
+ compulsory);
+ Configuration.ConfirmUint(childElem,
+ "MaxMutexes", env.MaxMutexes, compulsory);
+ try
+ {
+ Configuration.ConfirmUint(childElem,
+ "Increment", env.MutexIncrement,
+ compulsory);
+ }
+ catch (AssertionException)
+ {
+ Assert.AreEqual(0, env.MutexIncrement);
+ }
+
+ Configuration.ConfirmUint(childElem,
+ "NumTestAndSetSpins",
+ env.NumTestAndSetSpins, compulsory);
+ }
+
+ if (mpool == true)
+ {
+ childElem = (XmlElement)XMLReader.GetNode(
+ xmlElement, "MPoolConfig");
+ Configuration.ConfirmCacheSize(childElem,
+ "CacheSize", env.CacheSize, compulsory);
+ if (env.UsingMPool == false)
+ Configuration.ConfirmCacheSize(childElem,
+ "MaxCacheSize", env.MaxCacheSize, compulsory);
+ Configuration.ConfirmInt(childElem,
+ "MaxOpenFiles", env.MaxOpenFiles, compulsory);
+ Configuration.ConfirmMaxSequentialWrites(childElem,
+ "MaxSequentialWrites", env.SequentialWritePause,
+ env.MaxSequentialWrites, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "MMapSize", env.MMapSize, compulsory);
+ }
+
+ if (replication == true)
+ {
+ childElem = (XmlElement)XMLReader.GetNode(
+ xmlElement, "ReplicationConfig");
+ Configuration.ConfirmUint(childElem,
+ "AckTimeout", env.RepAckTimeout, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "BulkTransfer", env.RepBulkTransfer, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "CheckpointDelay", env.RepCheckpointDelay, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "ConnectionRetry", env.RepConnectionRetry, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "DelayClientSync", env.RepDelayClientSync, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "ElectionRetry", env.RepElectionRetry, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "ElectionTimeout", env.RepElectionTimeout, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "FullElectionTimeout", env.RepFullElectionTimeout,compulsory);
+ Configuration.ConfirmUint(childElem,
+ "HeartbeatMonitor", env.RepHeartbeatMonitor, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "HeartbeatSend", env.RepHeartbeatSend, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "LeaseTimeout", env.RepLeaseTimeout, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "NoAutoInit", env.RepNoAutoInit, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "NoBlocking", env.RepNoBlocking, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "NSites", env.RepNSites, compulsory);
+ Configuration.ConfirmUint(childElem,
+ "Priority", env.RepPriority, compulsory);
+ Configuration.ConfirmAckPolicy(childElem,
+ "RepMgrAckPolicy", env.RepMgrAckPolicy, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "Strict2Site", env.RepStrict2Site, compulsory);
+ Configuration.ConfirmBool(childElem,
+ "UseMasterLeases", env.RepUseMasterLeases, compulsory);
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/DatabaseExceptionTest.cs b/db-4.8.30/test/scr037/DatabaseExceptionTest.cs
new file mode 100644
index 0000000..4e662eb
--- /dev/null
+++ b/db-4.8.30/test/scr037/DatabaseExceptionTest.cs
@@ -0,0 +1,221 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class DatabaseExceptionTest
+ {
+ [Test]
+ public void TestDB_REP_DUPMASTER()
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_DUPMASTER);
+ }
+
+ [Test]
+ public void TestDB_REP_HOLDELECTION()
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_HOLDELECTION);
+ }
+
+ [Test]
+ public void TestDB_REP_IGNORE()
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_IGNORE);
+ }
+
+ [Test]
+ public void TestDB_REP_ISPERM()
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_ISPERM);
+ }
+
+ [Test]
+ public void TestDB_REP_JOIN_FAILURE()
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_JOIN_FAILURE);
+ }
+
+ [Test]
+ public void TestDB_REP_NEWSITE()
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_NEWSITE);
+ }
+
+ [Test]
+ public void TestDB_REP_NOTPERM()
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_NOTPERM);
+ }
+
+ [Test]
+ public void TestDeadlockException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_LOCK_DEADLOCK);
+ }
+ catch (DeadlockException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_LOCK_DEADLOCK, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestForeignConflictException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_FOREIGN_CONFLICT);
+ }
+ catch (ForeignConflictException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_FOREIGN_CONFLICT, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestKeyEmptyException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_KEYEMPTY);
+ }
+ catch (KeyEmptyException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_KEYEMPTY, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestKeyExistException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_KEYEXIST);
+ }
+ catch (KeyExistException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_KEYEXIST, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestLeaseExpiredException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_REP_LEASE_EXPIRED);
+ }
+ catch (LeaseExpiredException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_REP_LEASE_EXPIRED, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestLockNotGrantedException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_LOCK_NOTGRANTED);
+ }
+ catch (LockNotGrantedException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_LOCK_NOTGRANTED, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestNotFoundException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_NOTFOUND);
+ }
+ catch (NotFoundException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_NOTFOUND, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestOldVersionException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_OLD_VERSION);
+ }
+ catch (OldVersionException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_OLD_VERSION, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestPageNotFoundException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_PAGE_NOTFOUND);
+ }
+ catch (PageNotFoundException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_PAGE_NOTFOUND, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestRunRecoveryException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_RUNRECOVERY);
+ }
+ catch (RunRecoveryException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_RUNRECOVERY, e.ErrorCode);
+ }
+
+ }
+
+ [Test]
+ public void TestVerificationException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_VERIFY_BAD);
+ }
+ catch (VerificationException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_VERIFY_BAD, e.ErrorCode);
+ }
+ }
+
+ [Test]
+ public void TestVersionMismatchException()
+ {
+ try
+ {
+ DatabaseException.ThrowException(ErrorCodes.DB_VERSION_MISMATCH);
+ }
+ catch (VersionMismatchException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_VERSION_MISMATCH, e.ErrorCode);
+ }
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/DatabaseTest.cs b/db-4.8.30/test/scr037/DatabaseTest.cs
new file mode 100644
index 0000000..07634a6
--- /dev/null
+++ b/db-4.8.30/test/scr037/DatabaseTest.cs
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ public class DatabaseTest
+ {
+ public static uint getDefaultCacheSizeBytes()
+ {
+ uint defaultCacheSizeBytes;
+
+ string fixtureHome = "./TestOut/DatabaseTest";
+ string dbName = fixtureHome + "/" + "getDefaultCacheSizeBytes" + ".db";
+
+ Configuration.ClearDir(fixtureHome);
+
+ BTreeDatabaseConfig cfg = new BTreeDatabaseConfig();
+ cfg.Creation = CreatePolicy.ALWAYS;
+ using (BTreeDatabase db = BTreeDatabase.Open(dbName, cfg))
+ {
+ defaultCacheSizeBytes = db.CacheSize.Bytes;
+ }
+
+ return defaultCacheSizeBytes;
+ }
+
+ public static ByteOrder getMachineByteOrder()
+ {
+ string fixtureHome = "./TestOut/DatabaseTest";
+ string dbName = fixtureHome + "/" + "getMachineByteOrder" + ".db";
+
+ Configuration.ClearDir(fixtureHome);
+
+ ByteOrder byteOrder;
+
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.ByteOrder = ByteOrder.MACHINE;
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ using (BTreeDatabase db = BTreeDatabase.Open(dbName, dbConfig))
+ {
+ byteOrder = db.Endianness;
+ }
+ return byteOrder;
+ }
+
+ public static void Confirm(XmlElement xmlElement, Database db, bool compulsory)
+ {
+ uint defaultBytes;
+ defaultBytes = getDefaultCacheSizeBytes();
+
+ Configuration.ConfirmBool(xmlElement, "AutoCommit",
+ db.AutoCommit, compulsory);
+ Configuration.ConfirmCacheSize(xmlElement, "CacheSize",
+ db.CacheSize, defaultBytes, compulsory);
+ Configuration.ConfirmCreatePolicy(xmlElement, "Creation",
+ db.Creation, compulsory);
+ Configuration.ConfirmString(xmlElement, "DatabaseName",
+ db.DatabaseName, compulsory);
+ Configuration.ConfirmBool(xmlElement, "DoChecksum",
+ db.DoChecksum, compulsory);
+ // Encrypted and EncryptWithAES?
+ Configuration.ConfirmByteOrder(xmlElement, "ByteOrder",
+ db.Endianness, compulsory);
+ Configuration.ConfirmString(xmlElement, "ErrorPrefix",
+ db.ErrorPrefix, compulsory);
+ // File name is confirmed in functiion, not here.
+ Configuration.ConfirmBool(xmlElement, "FreeThreaded",
+ db.FreeThreaded, compulsory);
+ Configuration.ConfirmBool(xmlElement, "HasMultiple",
+ db.HasMultiple, compulsory);
+ if (db.Endianness == getMachineByteOrder())
+ Assert.IsTrue(db.InHostOrder);
+ else
+ Assert.IsFalse(db.InHostOrder);
+ Configuration.ConfirmBool(xmlElement, "NoMMap",
+ db.NoMMap, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NonDurableTxns",
+ db.NonDurableTxns, compulsory);
+ Configuration.ConfirmUint(xmlElement, "PageSize",
+ db.Pagesize, compulsory);
+ Configuration.ConfirmCachePriority(xmlElement,
+ "Priority", db.Priority, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ReadOnly",
+ db.ReadOnly, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ReadUncommitted",
+ db.ReadUncommitted, compulsory);
+ /*
+ * Database.Truncated is the value set in
+ * DatabaseConfig.Truncate.
+ */
+ Configuration.ConfirmBool(xmlElement, "Truncate",
+ db.Truncated, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseMVCC",
+ db.UseMVCC, compulsory);
+ }
+ }
+
+}
diff --git a/db-4.8.30/test/scr037/DotNetTest.csproj b/db-4.8.30/test/scr037/DotNetTest.csproj
new file mode 100644
index 0000000..0269b6f
--- /dev/null
+++ b/db-4.8.30/test/scr037/DotNetTest.csproj
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup>
+ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+ <ProductVersion>8.0.50727</ProductVersion>
+ <SchemaVersion>2.0</SchemaVersion>
+ <ProjectGuid>{08D940C7-AAF5-413A-95E3-58A46880DC4D}</ProjectGuid>
+ <OutputType>Library</OutputType>
+ <AppDesignerFolder>Properties</AppDesignerFolder>
+ <RootNamespace>DotNetTest</RootNamespace>
+ <AssemblyName>DotNetTest</AssemblyName>
+ <StartupObject>
+ </StartupObject>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+ <DebugSymbols>true</DebugSymbols>
+ <DebugType>full</DebugType>
+ <Optimize>false</Optimize>
+ <OutputPath>bin\Debug\</OutputPath>
+ <DefineConstants>DEBUG;TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\Release\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ </PropertyGroup>
+ <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
+ <!-- To modify your build process, add your task inside one of the targets below and uncomment it.
+ Other similar extension points exist, see Microsoft.Common.targets.
+ <Target Name="BeforeBuild">
+ </Target>
+ <Target Name="AfterBuild">
+ </Target>
+ -->
+ <ItemGroup>
+ <Compile Include="BTreeCursorTest.cs" />
+ <Compile Include="BTreeDatabaseConfigTest.cs" />
+ <Compile Include="BTreeDatabaseTest.cs" />
+ <Compile Include="Configuration.cs" />
+ <Compile Include="CursorConfigTest.cs" />
+ <Compile Include="CursorTest.cs" />
+ <Compile Include="DatabaseConfigTest.cs" />
+ <Compile Include="DatabaseEnvironmentConfigTest.cs" />
+ <Compile Include="DatabaseEnvironmentTest.cs" />
+ <Compile Include="DatabaseExceptionTest.cs" />
+ <Compile Include="DatabaseTest.cs" />
+ <Compile Include="ForeignKeyTest.cs" />
+ <Compile Include="HashCursorTest.cs" />
+ <Compile Include="HashDatabaseConfigTest.cs" />
+ <Compile Include="HashDatabaseTest.cs" />
+ <Compile Include="JoinCursorTest.cs" />
+ <Compile Include="LockingConfigTest.cs" />
+ <Compile Include="LockTest.cs">
+ <SubType>Code</SubType>
+ </Compile>
+ <Compile Include="LogConfigTest.cs" />
+ <Compile Include="MPoolConfigTest.cs" />
+ <Compile Include="MutexConfigTest.cs" />
+ <Compile Include="QueueDatabaseConfigTest.cs" />
+ <Compile Include="QueueDatabaseTest.cs" />
+ <Compile Include="RecnoCursorTest.cs" />
+ <Compile Include="RecnoDatabaseConfigTest.cs" />
+ <Compile Include="RecnoDatabaseTest.cs" />
+ <Compile Include="ReplicationConfigTest.cs" />
+ <Compile Include="ReplicationTest.cs" />
+ <Compile Include="SecondaryBTreeDatabaseConfigTest.cs" />
+ <Compile Include="SecondaryBTreeDatabaseTest.cs" />
+ <Compile Include="SecondaryCursorTest.cs" />
+ <Compile Include="SecondaryDatabaseConfigTest.cs" />
+ <Compile Include="SecondaryDatabaseTest.cs" />
+ <Compile Include="SecondaryHashDatabaseConfigTest.cs" />
+ <Compile Include="SecondaryHashDatabaseTest.cs" />
+ <Compile Include="SecondaryQueueDatabaseConfigTest.cs" />
+ <Compile Include="SecondaryQueueDatabaseTest.cs" />
+ <Compile Include="SecondaryRecnoDatabaseConfigTest.cs" />
+ <Compile Include="SecondaryRecnoDatabaseTest.cs" />
+ <Compile Include="SequenceConfigTest.cs" />
+ <Compile Include="SequenceTest.cs" />
+ <Compile Include="TestException.cs" />
+ <Compile Include="TransactionConfigTest.cs" />
+ <Compile Include="TransactionTest.cs" />
+ <Compile Include="XMLReader.cs" />
+ </ItemGroup>
+ <ItemGroup>
+ <ProjectReference Include="..\..\csharp\db_dotnet.csproj">
+ <Project>{4696FB1E-1E5F-40B9-BD8C-A54D3BDA00F6}</Project>
+ <Name>db_dotnet</Name>
+ </ProjectReference>
+ </ItemGroup>
+ <ItemGroup>
+ <Reference Include="nunit.framework, Version=2.4.8.0, Culture=neutral, PublicKeyToken=96d09a1eb7f44a77, processorArchitecture=MSIL" />
+ <Reference Include="System" />
+ <Reference Include="System.Data" />
+ <Reference Include="System.XML" />
+ </ItemGroup>
+ <PropertyGroup>
+ <PreBuildEvent>IF $(ConfigurationName) == Debug SET LIBEXT=d
+IF $(ConfigurationName) == Release SET LIBEXT
+
+copy /B "$(SolutionDir)Win32\$(ConfigurationName)\libdb48%25LIBEXT%25.dll" "$(TargetDir)"
+copy /B "$(SolutionDir)Win32\$(ConfigurationName)\libdb_csharp48%25LIBEXT%25.dll" "$(TargetDir)"
+copy "$(ProjectDir)AllTestData.xml" "$(TargetDir)"</PreBuildEvent>
+ </PropertyGroup>
+ <ItemGroup>
+ <Content Include="AllTestData.xml" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="bdb4.7.db" />
+ </ItemGroup>
+</Project>
diff --git a/db-4.8.30/test/scr037/ForeignKeyTest.cs b/db-4.8.30/test/scr037/ForeignKeyTest.cs
new file mode 100644
index 0000000..e140d7b
--- /dev/null
+++ b/db-4.8.30/test/scr037/ForeignKeyTest.cs
@@ -0,0 +1,280 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest {
+ [TestFixture]
+ public class ForeignKeyTest {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests() {
+ testFixtureName = "ForeignKeyTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ }
+
+ [Test]
+ public void TestAbortBTree() {
+ testName = "TestAbortBTree";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.BTREE, ForeignKeyDeleteAction.ABORT);
+ }
+ [Test]
+ public void TestAbortHash() {
+ testName = "TestAbortHash";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.HASH, ForeignKeyDeleteAction.ABORT);
+ }
+ [Test]
+ public void TestAbortQueue() {
+ testName = "TestAbortQueue";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.QUEUE, ForeignKeyDeleteAction.ABORT);
+ }
+ [Test]
+ public void TestAbortRecno() {
+ testName = "TestAbortRecno";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.RECNO, ForeignKeyDeleteAction.ABORT);
+ }
+
+ [Test]
+ public void TestCascadeBTree() {
+ testName = "TestCascadeBTree";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.BTREE, ForeignKeyDeleteAction.CASCADE);
+ }
+
+ [Test]
+ public void TestCascadeHash() {
+ testName = "TestCascadeHash";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.HASH, ForeignKeyDeleteAction.CASCADE);
+ }
+
+ [Test]
+ public void TestCascadeQueue() {
+ testName = "TestCascadeQueue";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.QUEUE, ForeignKeyDeleteAction.CASCADE);
+ }
+
+ [Test]
+ public void TestCascadeRecno() {
+ testName = "TestCascadeRecno";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.RECNO, ForeignKeyDeleteAction.CASCADE);
+ }
+
+ [Test]
+ public void TestNullifyBTree() {
+ testName = "TestNullifyBTree";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.BTREE, ForeignKeyDeleteAction.NULLIFY);
+ }
+
+ [Test]
+ public void TestNullifyHash() {
+ testName = "TestNullifyHash";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.HASH, ForeignKeyDeleteAction.NULLIFY);
+ }
+
+ [Test]
+ public void TestNullifyQueue() {
+ testName = "TestNullifyQueue";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.QUEUE, ForeignKeyDeleteAction.NULLIFY);
+ }
+
+ [Test]
+ public void TestNullifyRecno() {
+ testName = "TestNullifyRecno";
+ testHome = testFixtureHome + "/" + testName;
+ TestForeignKeyDelete(DatabaseType.RECNO, ForeignKeyDeleteAction.NULLIFY);
+ }
+
+ public void TestForeignKeyDelete(DatabaseType dbtype, ForeignKeyDeleteAction action) {
+ string dbFileName = testHome + "/" + testName + ".db";
+ string fdbFileName = testHome + "/" + testName + "foreign.db";
+ string sdbFileName = testHome + "/" + testName + "sec.db";
+ Configuration.ClearDir(testHome);
+
+ Database primaryDB, fdb;
+ SecondaryDatabase secDB;
+
+ // Open primary database.
+ if (dbtype == DatabaseType.BTREE) {
+ BTreeDatabaseConfig btConfig = new BTreeDatabaseConfig();
+ btConfig.Creation = CreatePolicy.ALWAYS;
+ primaryDB = BTreeDatabase.Open(dbFileName, btConfig);
+ fdb = BTreeDatabase.Open(fdbFileName, btConfig);
+ } else if (dbtype == DatabaseType.HASH) {
+ HashDatabaseConfig hConfig = new HashDatabaseConfig();
+ hConfig.Creation = CreatePolicy.ALWAYS;
+ primaryDB = HashDatabase.Open(dbFileName, hConfig);
+ fdb = HashDatabase.Open(fdbFileName, hConfig);
+ } else if (dbtype == DatabaseType.QUEUE) {
+ QueueDatabaseConfig qConfig = new QueueDatabaseConfig();
+ qConfig.Creation = CreatePolicy.ALWAYS;
+ qConfig.Length = 4;
+ primaryDB = QueueDatabase.Open(dbFileName, qConfig);
+ fdb = QueueDatabase.Open(fdbFileName, qConfig);
+ } else if (dbtype == DatabaseType.RECNO) {
+ RecnoDatabaseConfig rConfig = new RecnoDatabaseConfig();
+ rConfig.Creation = CreatePolicy.ALWAYS;
+ primaryDB = RecnoDatabase.Open(dbFileName, rConfig);
+ fdb = RecnoDatabase.Open(fdbFileName, rConfig);
+ } else {
+ throw new ArgumentException("Invalid DatabaseType");
+ }
+
+ // Open secondary database.
+ if (dbtype == DatabaseType.BTREE) {
+ SecondaryBTreeDatabaseConfig secbtConfig =
+ new SecondaryBTreeDatabaseConfig(primaryDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secbtConfig.Creation = CreatePolicy.ALWAYS;
+ secbtConfig.Duplicates = DuplicatesPolicy.SORTED;
+ if (action == ForeignKeyDeleteAction.NULLIFY)
+ secbtConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify));
+ else
+ secbtConfig.SetForeignKeyConstraint(fdb, action);
+ secDB = SecondaryBTreeDatabase.Open(sdbFileName, secbtConfig);
+ } else if (dbtype == DatabaseType.HASH) {
+ SecondaryHashDatabaseConfig sechConfig =
+ new SecondaryHashDatabaseConfig(primaryDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ sechConfig.Creation = CreatePolicy.ALWAYS;
+ sechConfig.Duplicates = DuplicatesPolicy.SORTED;
+ if (action == ForeignKeyDeleteAction.NULLIFY)
+ sechConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify));
+ else
+ sechConfig.SetForeignKeyConstraint(fdb, action);
+ secDB = SecondaryHashDatabase.Open(sdbFileName, sechConfig);
+ } else if (dbtype == DatabaseType.QUEUE) {
+ SecondaryQueueDatabaseConfig secqConfig =
+ new SecondaryQueueDatabaseConfig(primaryDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secqConfig.Creation = CreatePolicy.ALWAYS;
+ secqConfig.Length = 4;
+ if (action == ForeignKeyDeleteAction.NULLIFY)
+ secqConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify));
+ else
+ secqConfig.SetForeignKeyConstraint(fdb, action);
+ secDB = SecondaryQueueDatabase.Open(sdbFileName, secqConfig);
+ } else if (dbtype == DatabaseType.RECNO) {
+ SecondaryRecnoDatabaseConfig secrConfig =
+ new SecondaryRecnoDatabaseConfig(primaryDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secrConfig.Creation = CreatePolicy.ALWAYS;
+ if (action == ForeignKeyDeleteAction.NULLIFY)
+ secrConfig.SetForeignKeyConstraint(fdb, action, new ForeignKeyNullifyDelegate(Nullify));
+ else
+ secrConfig.SetForeignKeyConstraint(fdb, action);
+ secDB = SecondaryRecnoDatabase.Open(sdbFileName, secrConfig);
+ } else {
+ throw new ArgumentException("Invalid DatabaseType");
+ }
+
+ /* Use integer keys for Queue/Recno support. */
+ fdb.Put(new DatabaseEntry(BitConverter.GetBytes(100)),
+ new DatabaseEntry(BitConverter.GetBytes(1001)));
+ fdb.Put(new DatabaseEntry(BitConverter.GetBytes(200)),
+ new DatabaseEntry(BitConverter.GetBytes(2002)));
+ fdb.Put(new DatabaseEntry(BitConverter.GetBytes(300)),
+ new DatabaseEntry(BitConverter.GetBytes(3003)));
+
+ primaryDB.Put(new DatabaseEntry(BitConverter.GetBytes(1)),
+ new DatabaseEntry(BitConverter.GetBytes(100)));
+ primaryDB.Put(new DatabaseEntry(BitConverter.GetBytes(2)),
+ new DatabaseEntry(BitConverter.GetBytes(200)));
+ if (dbtype == DatabaseType.BTREE || dbtype == DatabaseType.HASH)
+ primaryDB.Put(new DatabaseEntry(BitConverter.GetBytes(3)),
+ new DatabaseEntry(BitConverter.GetBytes(100)));
+
+ try {
+ fdb.Delete(new DatabaseEntry(BitConverter.GetBytes(100)));
+ } catch (ForeignConflictException) {
+ Assert.AreEqual(action, ForeignKeyDeleteAction.ABORT);
+ }
+ if (action == ForeignKeyDeleteAction.ABORT) {
+ Assert.IsTrue(secDB.Exists(new DatabaseEntry(BitConverter.GetBytes(100))));
+ Assert.IsTrue(primaryDB.Exists(new DatabaseEntry(BitConverter.GetBytes(1))));
+ Assert.IsTrue(fdb.Exists(new DatabaseEntry(BitConverter.GetBytes(100))));
+ } else if (action == ForeignKeyDeleteAction.CASCADE) {
+ try {
+ Assert.IsFalse(secDB.Exists(new DatabaseEntry(BitConverter.GetBytes(100))));
+ } catch (KeyEmptyException) {
+ Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO);
+ }
+ try {
+ Assert.IsFalse(primaryDB.Exists(new DatabaseEntry(BitConverter.GetBytes(1))));
+ } catch (KeyEmptyException) {
+ Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO);
+ }
+ try {
+ Assert.IsFalse(fdb.Exists(new DatabaseEntry(BitConverter.GetBytes(100))));
+ } catch (KeyEmptyException) {
+ Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO);
+ }
+ } else if (action == ForeignKeyDeleteAction.NULLIFY) {
+ try {
+ Assert.IsFalse(secDB.Exists(new DatabaseEntry(BitConverter.GetBytes(100))));
+ } catch (KeyEmptyException) {
+ Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO);
+ }
+ Assert.IsTrue(primaryDB.Exists(new DatabaseEntry(BitConverter.GetBytes(1))));
+ try {
+ Assert.IsFalse(fdb.Exists(new DatabaseEntry(BitConverter.GetBytes(100))));
+ } catch (KeyEmptyException) {
+ Assert.IsTrue(dbtype == DatabaseType.QUEUE || dbtype == DatabaseType.RECNO);
+ }
+ }
+
+ // Close secondary database.
+ secDB.Close();
+
+ // Close primary database.
+ primaryDB.Close();
+
+ // Close foreign database
+ fdb.Close();
+ }
+
+ public DatabaseEntry SecondaryKeyGen(
+ DatabaseEntry key, DatabaseEntry data) {
+ DatabaseEntry dbtGen;
+
+ int skey = BitConverter.ToInt32(data.Data, 0);
+ // don't index secondary key of 0
+ if (skey == 0)
+ return null;
+
+ dbtGen = new DatabaseEntry(data.Data);
+ return dbtGen;
+ }
+
+ public DatabaseEntry Nullify(DatabaseEntry key, DatabaseEntry data, DatabaseEntry fkey) {
+ DatabaseEntry ret = new DatabaseEntry(BitConverter.GetBytes(0));
+ return ret;
+ }
+
+ }
+} \ No newline at end of file
diff --git a/db-4.8.30/test/scr037/HashCursorTest.cs b/db-4.8.30/test/scr037/HashCursorTest.cs
new file mode 100644
index 0000000..1dd4cfb
--- /dev/null
+++ b/db-4.8.30/test/scr037/HashCursorTest.cs
@@ -0,0 +1,237 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class HashCursorTest
+ {
+ private string testFixtureName;
+ private string testFixtureHome;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "HashCursorTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ }
+
+ [Test]
+ public void TestAddToLoc()
+ {
+ HashDatabase db;
+ HashCursor cursor;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestAddToLoc";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open a hash database and cursor and then
+ * add record("key", "data") into database.
+ */
+ GetHashDBAndCursor(testHome, testName, out db, out cursor);
+ AddOneByCursor(cursor);
+
+ /*
+ * Add the new record("key","data1") as the first
+ * of the data item of "key".
+ */
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data1")));
+ cursor.Add(pair, Cursor.InsertLocation.FIRST);
+
+ /*
+ * Confirm that the new record is added as the first of
+ * the data item of "key".
+ */
+ cursor.Move(new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")), true);
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("data1"),
+ cursor.Current.Value.Data);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestAddUnique()
+ {
+ HashDatabase db;
+ HashCursor cursor;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestAddUnique";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and cursor.
+ HashDatabaseConfig dbConfig = new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ /*
+ * To put no duplicate data, the database should be
+ * set to be sorted.
+ */
+ dbConfig.Duplicates = DuplicatesPolicy.SORTED;
+ db = HashDatabase.Open(
+ testHome + "/" + testName + ".db", dbConfig);
+ cursor = db.Cursor();
+
+ // Add record("key", "data") into database.
+ AddOneByCursor(cursor);
+
+ /*
+ * Fail to add duplicate record("key","data").
+ */
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ try
+ {
+ cursor.AddUnique(pair);
+ }
+ catch (KeyExistException)
+ {
+ }
+ finally
+ {
+ cursor.Close();
+ db.Close();
+ }
+ }
+
+ [Test]
+ public void TestDuplicate()
+ {
+ HashDatabase db;
+ HashCursor cursor, dupCursor;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetHashDBAndCursor(testHome, testName,
+ out db, out cursor);
+
+ /*
+ * Add a record("key", "data") by cursor and move
+ * the cursor to the current record.
+ */
+ AddOneByCursor(cursor);
+ cursor.Refresh();
+
+ //Duplicate a new cursor to the same position.
+ dupCursor = cursor.Duplicate(true);
+
+ // Overwrite the record.
+ dupCursor.Overwrite(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("newdata")));
+
+ // Confirm that the original data doesn't exist.
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ Assert.IsFalse(dupCursor.Move(pair, true));
+
+ dupCursor.Close();
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestInsertToLoc()
+ {
+ HashDatabase db;
+ HashDatabaseConfig dbConfig;
+ HashCursor cursor;
+ DatabaseEntry data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ string dbFileName;
+
+ testName = "TestInsertToLoc";
+ testHome = testFixtureHome + "/" + testName;
+ dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open database and cursor.
+ dbConfig = new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ /*
+ * The database should be set to be unsorted to
+ * insert before/after a certain record.
+ */
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ db = HashDatabase.Open(dbFileName, dbConfig);
+ cursor = db.Cursor();
+
+ // Add record("key", "data") into database.
+ AddOneByCursor(cursor);
+
+ /*
+ * Insert the new record("key","data1") after the
+ * record("key", "data").
+ */
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data1"));
+ cursor.Insert(data, Cursor.InsertLocation.AFTER);
+
+ /*
+ * Move the cursor to the record("key", "data") and
+ * confirm that the next record is the one just inserted.
+ */
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ Assert.IsTrue(cursor.Move(pair, true));
+ Assert.IsTrue(cursor.MoveNext());
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("key"),
+ cursor.Current.Key.Data);
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("data1"),
+ cursor.Current.Value.Data);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ public void GetHashDBAndCursor(string home, string name,
+ out HashDatabase db, out HashCursor cursor)
+ {
+ string dbFileName = home + "/" + name + ".db";
+ HashDatabaseConfig dbConfig = new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ db = HashDatabase.Open(dbFileName, dbConfig);
+ cursor = db.Cursor();
+ }
+
+ public void AddOneByCursor(HashCursor cursor)
+ {
+ DatabaseEntry key, data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data"));
+ pair = new KeyValuePair<DatabaseEntry,DatabaseEntry>(key, data);
+ cursor.Add(pair);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/HashDatabaseConfigTest.cs b/db-4.8.30/test/scr037/HashDatabaseConfigTest.cs
new file mode 100644
index 0000000..ef18f8c
--- /dev/null
+++ b/db-4.8.30/test/scr037/HashDatabaseConfigTest.cs
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class HashDatabaseConfigTest : DatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "HashDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ override public void TestConfigWithoutEnv()
+ {
+ testName = "TestConfigWithoutEnv";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ HashDatabaseConfig hashConfig = new HashDatabaseConfig();
+ Config(xmlElem, ref hashConfig, true);
+ Confirm(xmlElem, hashConfig, true);
+ }
+
+
+ public static void Confirm(XmlElement xmlElement,
+ HashDatabaseConfig hashDBConfig, bool compulsory)
+ {
+ DatabaseConfig dbConfig = hashDBConfig;
+ Confirm(xmlElement, dbConfig, compulsory);
+
+ // Confirm Hash database specific configuration.
+ Configuration.ConfirmCreatePolicy(xmlElement,
+ "Creation", hashDBConfig.Creation, compulsory);
+ Configuration.ConfirmDuplicatesPolicy(xmlElement,
+ "Duplicates", hashDBConfig.Duplicates, compulsory);
+ Configuration.ConfirmUint(xmlElement, "FillFactor",
+ hashDBConfig.FillFactor, compulsory);
+ Configuration.ConfirmUint(xmlElement, "NumElements",
+ hashDBConfig.TableSize, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref HashDatabaseConfig hashDBConfig, bool compulsory)
+ {
+ uint fillFactor = new uint();
+ uint numElements = new uint();
+ DatabaseConfig dbConfig = hashDBConfig;
+ Config(xmlElement, ref dbConfig, compulsory);
+
+ // Configure specific fields/properties of Hash db
+ Configuration.ConfigCreatePolicy(xmlElement,
+ "Creation", ref hashDBConfig.Creation,
+ compulsory);
+ Configuration.ConfigDuplicatesPolicy(xmlElement,
+ "Duplicates", ref hashDBConfig.Duplicates,
+ compulsory);
+ if (Configuration.ConfigUint(xmlElement, "FillFactor",
+ ref fillFactor, compulsory))
+ hashDBConfig.FillFactor = fillFactor;
+ if (Configuration.ConfigUint(xmlElement, "NumElements",
+ ref numElements, compulsory))
+ hashDBConfig.TableSize = numElements;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/HashDatabaseTest.cs b/db-4.8.30/test/scr037/HashDatabaseTest.cs
new file mode 100644
index 0000000..b53ab62
--- /dev/null
+++ b/db-4.8.30/test/scr037/HashDatabaseTest.cs
@@ -0,0 +1,466 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class HashDatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "HashDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestHashComparison()
+ {
+ testName = "TestHashComparison";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ Configuration.ClearDir(testHome);
+
+ HashDatabaseConfig dbConfig = new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.HashComparison = new EntryComparisonDelegate(EntryComparison);
+ HashDatabase db = HashDatabase.Open(dbFileName,dbConfig);
+ int ret;
+
+ /*
+ * Comparison gets the value that lowest byte of the
+ * former dbt minus that of the latter one.
+ */
+ ret = db.Compare(new DatabaseEntry(BitConverter.GetBytes(2)),
+ new DatabaseEntry(BitConverter.GetBytes(2)));
+ Assert.AreEqual(0, ret);
+
+ ret = db.Compare(new DatabaseEntry(BitConverter.GetBytes(256)),
+ new DatabaseEntry(BitConverter.GetBytes(1)));
+ Assert.Greater(0, ret);
+
+ db.Close();
+ }
+
+ public int EntryComparison(DatabaseEntry dbt1,
+ DatabaseEntry dbt2)
+ {
+ return dbt1.Data[0] - dbt2.Data[0];
+ }
+
+ [Test]
+ public void TestHashFunction()
+ {
+ testName = "TestHashFunction";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ Configuration.ClearDir(testHome);
+
+ HashDatabaseConfig dbConfig = new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.HashFunction = new HashFunctionDelegate(HashFunction);
+ HashDatabase db = HashDatabase.Open(dbFileName, dbConfig);
+
+ // Hash function will change the lowest byte to 0;
+ uint data = db.HashFunction(BitConverter.GetBytes(1));
+ Assert.AreEqual(0, data);
+ db.Close();
+ }
+
+ public uint HashFunction(byte[] data)
+ {
+ data[0] = 0;
+ return BitConverter.ToUInt32(data, 0);
+ }
+
+ [Test]
+ public void TestOpenExistingHashDB()
+ {
+ testName = "TestOpenExistingHashDB";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ HashDatabaseConfig hashConfig =
+ new HashDatabaseConfig();
+ hashConfig.Creation = CreatePolicy.ALWAYS;
+ HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig);
+ hashDB.Close();
+
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ Database db = Database.Open(dbFileName, dbConfig);
+ Assert.AreEqual(db.Type, DatabaseType.HASH);
+ db.Close();
+ }
+
+ [Test]
+ public void TestOpenNewHashDB()
+ {
+ testName = "TestOpenNewHashDB";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ XmlElement xmlElem = Configuration.TestSetUp(testFixtureName, testName);
+ HashDatabaseConfig hashConfig = new HashDatabaseConfig();
+ HashDatabaseConfigTest.Config(xmlElem, ref hashConfig, true);
+ HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig);
+ Confirm(xmlElem, hashDB, true);
+ hashDB.Close();
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestPutNoDuplicateWithUnsortedDuplicate()
+ {
+ testName = "TestPutNoDuplicateWithUnsortedDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ HashDatabaseConfig hashConfig = new HashDatabaseConfig();
+ hashConfig.Creation = CreatePolicy.ALWAYS;
+ hashConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ hashConfig.ErrorPrefix = testName;
+
+ HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig);
+ DatabaseEntry key, data;
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1"));
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1"));
+
+ try
+ {
+ hashDB.PutNoDuplicate(key, data);
+ }
+ catch (DatabaseException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ hashDB.Close();
+ }
+ }
+
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestKeyExistException()
+ {
+ testName = "TestKeyExistException";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ HashDatabaseConfig hashConfig = new HashDatabaseConfig();
+ hashConfig.Creation = CreatePolicy.ALWAYS;
+ hashConfig.Duplicates = DuplicatesPolicy.SORTED;
+ HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig);
+
+ // Put the same record into db twice.
+ DatabaseEntry key, data;
+ key = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1"));
+ data = new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("1"));
+ try
+ {
+ hashDB.PutNoDuplicate(key, data);
+ hashDB.PutNoDuplicate(key, data);
+ }
+ catch (KeyExistException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ hashDB.Close();
+ }
+ }
+
+ [Test]
+ public void TestPutNoDuplicate()
+ {
+ testName = "TestPutNoDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ HashDatabaseConfig hashConfig =
+ new HashDatabaseConfig();
+ hashConfig.Creation = CreatePolicy.ALWAYS;
+ hashConfig.Duplicates = DuplicatesPolicy.SORTED;
+ hashConfig.TableSize = 20;
+ HashDatabase hashDB = HashDatabase.Open(dbFileName, hashConfig);
+
+ DatabaseEntry key, data;
+ for (int i = 1; i <= 10; i++)
+ {
+ key = new DatabaseEntry(BitConverter.GetBytes(i));
+ data = new DatabaseEntry(BitConverter.GetBytes(i));
+ hashDB.PutNoDuplicate(key, data);
+ }
+
+ Assert.IsTrue(hashDB.Exists(
+ new DatabaseEntry(BitConverter.GetBytes((int)5))));
+
+ hashDB.Close();
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestPutNoDuplicateWithTxn()
+ {
+ testName = "TestPutNoDuplicateWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Open a hash database within a transaction.
+ Transaction txn = env.BeginTransaction();
+ HashDatabaseConfig dbConfig = new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Duplicates = DuplicatesPolicy.SORTED;
+ dbConfig.Env = env;
+ HashDatabase db = HashDatabase.Open(testName + ".db", dbConfig, txn);
+
+ DatabaseEntry dbt = new DatabaseEntry(BitConverter.GetBytes((int)100));
+ db.PutNoDuplicate(dbt, dbt, txn);
+ try
+ {
+ db.PutNoDuplicate(dbt, dbt, txn);
+ }
+ catch (KeyExistException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ // Close all.
+ db.Close();
+ txn.Commit();
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestStats()
+ {
+ testName = "TestStats";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ Configuration.ClearDir(testHome);
+
+ HashDatabaseConfig dbConfig = new HashDatabaseConfig();
+ ConfigCase1(dbConfig);
+ HashDatabase db = HashDatabase.Open(dbFileName, dbConfig);
+
+ HashStats stats = db.Stats();
+ HashStats fastStats = db.FastStats();
+ ConfirmStatsPart1Case1(stats);
+ ConfirmStatsPart1Case1(fastStats);
+
+ // Put 100 records into the database.
+ PutRecordCase1(db, null);
+
+ stats = db.Stats();
+ ConfirmStatsPart2Case1(stats);
+
+ // Delete some data to get some free pages.
+ byte[] bigArray = new byte[262144];
+ db.Delete(new DatabaseEntry(bigArray));
+ stats = db.Stats();
+ ConfirmStatsPart3Case1(stats);
+
+ db.Close();
+ }
+
+ [Test]
+ public void TestStatsInTxn()
+ {
+ testName = "TestStatsInTxn";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, false);
+ }
+
+ [Test]
+ public void TestStatsWithIsolation()
+ {
+ testName = "TestStatsWithIsolation";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, true);
+ }
+
+ public void StatsInTxn(string home, string name, bool ifIsolation)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ EnvConfigCase1(envConfig);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(home, envConfig);
+
+ Transaction openTxn = env.BeginTransaction();
+ HashDatabaseConfig dbConfig = new HashDatabaseConfig();
+ ConfigCase1(dbConfig);
+ dbConfig.Env = env;
+ HashDatabase db = HashDatabase.Open(name + ".db", dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction statsTxn = env.BeginTransaction();
+ HashStats stats;
+ HashStats fastStats;
+ if (ifIsolation == false)
+ {
+ stats = db.Stats(statsTxn);
+ fastStats = db.Stats(statsTxn);
+ }
+ else
+ {
+ stats = db.Stats(statsTxn, Isolation.DEGREE_ONE);
+ fastStats = db.Stats(statsTxn, Isolation.DEGREE_ONE);
+ }
+
+ ConfirmStatsPart1Case1(stats);
+
+ // Put 100 records into the database.
+ PutRecordCase1(db, statsTxn);
+
+ if (ifIsolation == false)
+ stats = db.Stats(statsTxn);
+ else
+ stats = db.Stats(statsTxn, Isolation.DEGREE_TWO);
+ ConfirmStatsPart2Case1(stats);
+
+ // Delete some data to get some free pages.
+ byte[] bigArray = new byte[262144];
+ db.Delete(new DatabaseEntry(bigArray), statsTxn);
+ if (ifIsolation == false)
+ stats = db.Stats(statsTxn);
+ else
+ stats = db.Stats(statsTxn, Isolation.DEGREE_THREE);
+ ConfirmStatsPart3Case1(stats);
+
+ statsTxn.Commit();
+ db.Close();
+ env.Close();
+ }
+
+ public void EnvConfigCase1(DatabaseEnvironmentConfig cfg)
+ {
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ }
+
+ public void ConfigCase1(HashDatabaseConfig dbConfig)
+ {
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ dbConfig.FillFactor = 10;
+ dbConfig.TableSize = 20;
+ dbConfig.PageSize = 4096;
+ }
+
+ public void PutRecordCase1(HashDatabase db, Transaction txn)
+ {
+ byte[] bigArray = new byte[262144];
+ for (int i = 0; i < 50; i++)
+ {
+ if (txn == null)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+ else
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)), txn);
+ }
+ for (int i = 50; i < 100; i++)
+ {
+ if (txn == null)
+ db.Put(new DatabaseEntry(bigArray),
+ new DatabaseEntry(bigArray));
+ else
+ db.Put(new DatabaseEntry(bigArray),
+ new DatabaseEntry(bigArray), txn);
+ }
+ }
+
+ public void ConfirmStatsPart1Case1(HashStats stats)
+ {
+ Assert.AreEqual(10, stats.FillFactor);
+ Assert.AreEqual(4096, stats.PageSize);
+ Assert.AreNotEqual(0, stats.Version);
+ }
+
+ public void ConfirmStatsPart2Case1(HashStats stats)
+ {
+ Assert.AreNotEqual(0, stats.BigPages);
+ Assert.AreNotEqual(0, stats.BigPagesFreeBytes);
+ Assert.AreNotEqual(0, stats.BucketPagesFreeBytes);
+ Assert.AreNotEqual(0, stats.DuplicatePages);
+ Assert.AreNotEqual(0, stats.DuplicatePagesFreeBytes);
+ Assert.AreNotEqual(0, stats.MagicNumber);
+ Assert.AreNotEqual(0, stats.MetadataFlags);
+ Assert.AreEqual(100, stats.nData);
+ Assert.AreNotEqual(0, stats.nHashBuckets);
+ Assert.AreEqual(51, stats.nKeys);
+ Assert.AreNotEqual(0, stats.nPages);
+ Assert.AreEqual(0, stats.OverflowPages);
+ Assert.AreEqual(0, stats.OverflowPagesFreeBytes);
+ }
+
+ public void ConfirmStatsPart3Case1(HashStats stats)
+ {
+ Assert.AreNotEqual(0, stats.FreePages);
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ HashDatabase hashDB, bool compulsory)
+ {
+ DatabaseTest.Confirm(xmlElem, hashDB, compulsory);
+ Configuration.ConfirmCreatePolicy(xmlElem,
+ "Creation", hashDB.Creation, compulsory);
+ Configuration.ConfirmDuplicatesPolicy(xmlElem,
+ "Duplicates", hashDB.Duplicates, compulsory);
+ Configuration.ConfirmUint(xmlElem,
+ "FillFactor", hashDB.FillFactor, compulsory);
+ Configuration.ConfirmUint(xmlElem,
+ "NumElements", hashDB.TableSize * hashDB.FillFactor,
+ compulsory);
+ Assert.AreEqual(DatabaseType.HASH, hashDB.Type);
+ string type = hashDB.Type.ToString();
+ Assert.IsNotNull(type);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/JoinCursorTest.cs b/db-4.8.30/test/scr037/JoinCursorTest.cs
new file mode 100644
index 0000000..15f8ade
--- /dev/null
+++ b/db-4.8.30/test/scr037/JoinCursorTest.cs
@@ -0,0 +1,204 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class JoinCursorTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "JoinCursorTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestJoin()
+ {
+ testName = "TestJoin";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string secFileName1 = testHome + "/" + "sec_" + testName + "1.db";
+ string secFileName2 = testHome + "/" + "sec_" + testName + "2.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary database.
+ BTreeDatabaseConfig dbCfg = new BTreeDatabaseConfig();
+ dbCfg.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase db = BTreeDatabase.Open(dbFileName, dbCfg);
+
+ /*
+ * Open two databases, their secondary databases and
+ * secondary cursors.
+ */
+ SecondaryBTreeDatabase secDB1, secDB2;
+ SecondaryCursor[] cursors = new SecondaryCursor[2];
+ GetSecCursor(db, secFileName1,
+ new SecondaryKeyGenDelegate(KeyGenOnBigByte),
+ out secDB1, out cursors[0], false, null);
+ GetSecCursor(db, secFileName2,
+ new SecondaryKeyGenDelegate(KeyGenOnLittleByte),
+ out secDB2, out cursors[1], true, null);
+
+ // Get join cursor.
+ JoinCursor joinCursor = db.Join(cursors, true);
+
+ // Close all.
+ joinCursor.Close();
+ cursors[0].Close();
+ cursors[1].Close();
+ secDB1.Close();
+ secDB2.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveJoinCursor()
+ {
+ testName = "TestMoveJoinCursor";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string secFileName1 = testHome + "/" + "sec_" + testName + "1.db";
+ string secFileName2 = testHome + "/" + "sec_" + testName + "2.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary database.
+ BTreeDatabaseConfig dbCfg = new BTreeDatabaseConfig();
+ dbCfg.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase db = BTreeDatabase.Open(dbFileName, dbCfg);
+
+ /*
+ * Open a secondary database on high byte of
+ * its data and another secondary database on
+ * little byte of its data.
+ */
+ SecondaryBTreeDatabase secDB1, secDB2;
+ SecondaryCursor[] cursors = new SecondaryCursor[2];
+ byte[] byteValue = new byte[1];
+
+ byteValue[0] = 0;
+ GetSecCursor(db, secFileName1,
+ new SecondaryKeyGenDelegate(KeyGenOnBigByte),
+ out secDB1, out cursors[0], false,
+ new DatabaseEntry(byteValue));
+
+ byteValue[0] = 1;
+ GetSecCursor(db, secFileName2,
+ new SecondaryKeyGenDelegate(KeyGenOnLittleByte),
+ out secDB2, out cursors[1], true,
+ new DatabaseEntry(byteValue));
+
+ // Get join cursor.
+ JoinCursor joinCursor = db.Join(cursors, true);
+
+ /*
+ * MoveNextJoinItem do not use data value found
+ * in all of the cursor so the data in Current won't be
+ * changed.
+ */
+ Assert.IsTrue(joinCursor.MoveNextItem());
+ Assert.AreEqual(0, joinCursor.Current.Key.Data[
+ joinCursor.Current.Key.Data.Length - 1]);
+ Assert.AreEqual(1, joinCursor.Current.Key.Data[0]);
+ Assert.IsNull(joinCursor.Current.Value.Data);
+
+ // Iterate on join cursor.
+ foreach (KeyValuePair<DatabaseEntry, DatabaseEntry> pair in joinCursor)
+ {
+ /*
+ * Confirm that the key got by join cursor has 0 at
+ * its highest byte and 1 at its lowest byte.
+ */
+ Assert.AreEqual(0, pair.Key.Data[pair.Key.Data.Length - 1]);
+ Assert.AreEqual(1, pair.Key.Data[0]);
+ }
+
+ Assert.IsFalse(joinCursor.MoveNext());
+
+ // Close all.
+ joinCursor.Close();
+ cursors[0].Close();
+ cursors[1].Close();
+ secDB1.Close();
+ secDB2.Close();
+ db.Close();
+ }
+
+ public void GetSecCursor(BTreeDatabase db,
+ string secFileName, SecondaryKeyGenDelegate keyGen,
+ out SecondaryBTreeDatabase secDB,
+ out SecondaryCursor cursor, bool ifCfg,
+ DatabaseEntry data)
+ {
+ // Open secondary database.
+ SecondaryBTreeDatabaseConfig secCfg =
+ new SecondaryBTreeDatabaseConfig(db, keyGen);
+ secCfg.Creation = CreatePolicy.IF_NEEDED;
+ secCfg.Duplicates = DuplicatesPolicy.SORTED;
+ secDB = SecondaryBTreeDatabase.Open(secFileName, secCfg);
+
+ int[] intArray = new int[4];
+ intArray[0] = 0;
+ intArray[1] = 1;
+ intArray[2] = 2049;
+ intArray[3] = 65537;
+ for (int i = 0; i < 4; i++)
+ {
+ DatabaseEntry record = new DatabaseEntry(
+ BitConverter.GetBytes(intArray[i]));
+ db.Put(record, record);
+ }
+
+ // Get secondary cursor on the secondary database.
+ if (ifCfg == false)
+ cursor = secDB.SecondaryCursor();
+ else
+ cursor = secDB.SecondaryCursor(new CursorConfig());
+
+ // Position the cursor.
+ if (data != null)
+ Assert.IsTrue(cursor.Move(data, true));
+ }
+
+ public DatabaseEntry KeyGenOnLittleByte(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ byte[] byteArr = new byte[1];
+ byteArr[0] = data.Data[0];
+ DatabaseEntry dbtGen = new DatabaseEntry(byteArr);
+ return dbtGen;
+ }
+
+ public DatabaseEntry KeyGenOnBigByte(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ byte[] byteArr = new byte[1];
+ byteArr[0] = data.Data[data.Data.Length - 1];
+ DatabaseEntry dbtGen = new DatabaseEntry(byteArr);
+ return dbtGen;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/LockTest.cs b/db-4.8.30/test/scr037/LockTest.cs
new file mode 100644
index 0000000..bc4e3ad
--- /dev/null
+++ b/db-4.8.30/test/scr037/LockTest.cs
@@ -0,0 +1,116 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest {
+ [TestFixture]
+ public class LockTest {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests() {
+ testFixtureName = "LockTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ /*
+ * Delete existing test ouput directory and files specified
+ * for the current test fixture and then create a new one.
+ */
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestLockStats() {
+ testName = "TestLockManyAndStats";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ // Configure locking subsystem.
+ LockingConfig lkConfig = new LockingConfig();
+ lkConfig.MaxLockers = 60;
+ lkConfig.MaxLocks = 50;
+ lkConfig.MaxObjects = 70;
+ lkConfig.Partitions = 20;
+
+ // Configure and open environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.MPoolSystemCfg = new MPoolConfig();
+ envConfig.Create = true;
+ envConfig.LockSystemCfg = lkConfig;
+ envConfig.UseLocking = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ envConfig.ErrorPrefix = testName;
+ envConfig.NoLocking = false;
+ envConfig.LockTimeout = 1000;
+ envConfig.TxnTimeout = 2000;
+ envConfig.MPoolSystemCfg.CacheSize = new CacheInfo(0, 10485760, 1);
+ DatabaseEnvironment env =
+ DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Get and confirm locking subsystem statistics.
+ LockStats stats = env.LockingSystemStats();
+ env.PrintLockingSystemStats(true, true);
+ Assert.AreEqual(1000, stats.LockTimeoutLength);
+ Assert.AreEqual(60, stats.MaxLockersInTable);
+ Assert.AreEqual(50, stats.MaxLocksInTable);
+ Assert.AreEqual(70, stats.MaxObjectsInTable);
+ Assert.AreNotEqual(0, stats.MaxUnusedID);
+ Assert.AreEqual(20, stats.nPartitions);
+ Assert.AreNotEqual(0, stats.RegionNoWait);
+ Assert.AreNotEqual(0, stats.RegionSize);
+ Assert.AreEqual(0, stats.RegionWait);
+ Assert.AreEqual(2000, stats.TxnTimeoutLength);
+
+ env.PrintLockingSystemStats();
+
+ env.Close();
+ }
+
+ public static void LockingEnvSetUp(string testHome,
+ string testName, out DatabaseEnvironment env,
+ uint maxLock, uint maxLocker, uint maxObject,
+ uint partition) {
+ // Configure env and locking subsystem.
+ LockingConfig lkConfig = new LockingConfig();
+ /*
+ * If the maximum number of locks/lockers/objects
+ * is given, then the LockingConfig is set. Unless,
+ * it is not set to any value.
+ */
+ if (maxLock != 0)
+ lkConfig.MaxLocks = maxLock;
+ if (maxLocker != 0)
+ lkConfig.MaxLockers = maxLocker;
+ if (maxObject != 0)
+ lkConfig.MaxObjects = maxObject;
+ if (partition != 0)
+ lkConfig.Partitions = partition;
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.LockSystemCfg = lkConfig;
+ envConfig.UseLocking = true;
+ envConfig.ErrorPrefix = testName;
+
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/LockingConfigTest.cs b/db-4.8.30/test/scr037/LockingConfigTest.cs
new file mode 100644
index 0000000..82cf630
--- /dev/null
+++ b/db-4.8.30/test/scr037/LockingConfigTest.cs
@@ -0,0 +1,162 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class LockingConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "LockingConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+
+ // Configure the fields/properties.
+ LockingConfig lockingConfig = new LockingConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ // Configure LockingConfig
+ Config(xmlElem, ref lockingConfig, true);
+
+ // Confirm LockingConfig
+ Confirm(xmlElem, lockingConfig, true);
+ }
+
+ [Test]
+ public void TestMaxLock() {
+ testName = "TestMaxLock";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironment env;
+ uint maxLocks;
+ DatabaseEntry obj;
+
+ maxLocks = 1;
+ obj = new DatabaseEntry();
+
+ /*
+ * Initialize environment using locking subsystem. The max number
+ * of locks should be larger than environment's partitions. So in
+ * order to make the MaxLock work, the environment paritition is
+ * set to be the same value as MaxLock.
+ */
+ LockTest.LockingEnvSetUp(testHome, testName, out env,
+ maxLocks, 0, 0, maxLocks);
+ Assert.AreEqual(maxLocks, env.MaxLocks);
+
+ env.Close();
+ }
+
+ [Test]
+ public void TestMaxLocker() {
+ testName = "TestMaxLocker";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironment env;
+ uint maxLockers;
+
+ maxLockers = 1;
+ LockTest.LockingEnvSetUp(testHome, testName, out env,
+ 0, maxLockers, 0, 0);
+ Assert.AreEqual(maxLockers, env.MaxLockers);
+ env.Close();
+ }
+
+ [Test]
+ public void TestMaxObjects() {
+ testName = "TestMaxObjects";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironment env;
+ uint maxObjects;
+
+ maxObjects = 1;
+
+ /*
+ * Initialize environment using locking subsystem. The max number
+ * of objects should be larger than environment's partitions. So
+ * in order to make the MaxObject work, the environment paritition
+ * is set to be the same value as MaxObject.
+ */
+ LockTest.LockingEnvSetUp(testHome, testName, out env, 0, 0,
+ maxObjects, maxObjects);
+ Assert.AreEqual(maxObjects, env.MaxObjects);
+ env.Close();
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ LockingConfig lockingConfig, bool compulsory)
+ {
+ Configuration.ConfirmByteMatrix(xmlElement, "Conflicts",
+ lockingConfig.Conflicts, compulsory);
+ Configuration.ConfirmDeadlockPolicy(xmlElement,
+ "DeadlockResolution",
+ lockingConfig.DeadlockResolution, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MaxLockers",
+ lockingConfig.MaxLockers, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MaxLocks",
+ lockingConfig.MaxLocks, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MaxObjects",
+ lockingConfig.MaxObjects, compulsory);
+ Configuration.ConfirmUint(xmlElement, "Partitions",
+ lockingConfig.Partitions, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref LockingConfig lockingConfig, bool compulsory)
+ {
+ byte[,] matrix = new byte[6, 6];
+ uint value = new uint();
+
+ if (Configuration.ConfigByteMatrix(xmlElement, "Conflicts",
+ ref matrix, compulsory) == true)
+ lockingConfig.Conflicts = matrix;
+
+ Configuration.ConfigDeadlockPolicy(xmlElement, "DeadlockResolution",
+ ref lockingConfig.DeadlockResolution, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "MaxLockers",
+ ref value, compulsory))
+ lockingConfig.MaxLockers = value;
+ if (Configuration.ConfigUint(xmlElement, "MaxLocks",
+ ref value, compulsory))
+ lockingConfig.MaxLocks = value;
+ if (Configuration.ConfigUint(xmlElement, "MaxObjects",
+ ref value, compulsory))
+ lockingConfig.MaxObjects = value;
+ if (Configuration.ConfigUint(xmlElement, "Partitions",
+ ref value, compulsory))
+ lockingConfig.Partitions = value;
+ }
+
+ }
+}
diff --git a/db-4.8.30/test/scr037/LogConfigTest.cs b/db-4.8.30/test/scr037/LogConfigTest.cs
new file mode 100644
index 0000000..a49196f
--- /dev/null
+++ b/db-4.8.30/test/scr037/LogConfigTest.cs
@@ -0,0 +1,297 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class LogConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "LogConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ /*
+ * Configure the fields/properties and see if
+ * they are updated successfully.
+ */
+ LogConfig logConfig = new LogConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(testFixtureName, testName);
+ Config(xmlElem, ref logConfig, true);
+ Confirm(xmlElem, logConfig, true);
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestFullLogBufferException()
+ {
+ testName = "TestFullLogBufferException";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Open an environment and configured log subsystem.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.TxnNoSync = true;
+ cfg.UseTxns = true;
+ cfg.UseLocking = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ cfg.LogSystemCfg = new LogConfig();
+ cfg.LogSystemCfg.AutoRemove = false;
+ cfg.LogSystemCfg.BufferSize = 409600;
+ cfg.LogSystemCfg.MaxFileSize = 10480;
+ cfg.LogSystemCfg.NoBuffer = false;
+ cfg.LogSystemCfg.ZeroOnCreate = true;
+ cfg.LogSystemCfg.InMemory = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
+
+ BTreeDatabase db;
+ try
+ {
+ Transaction openTxn = env.BeginTransaction();
+ try
+ {
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ db = BTreeDatabase.Open(testName + ".db", dbConfig, openTxn);
+ openTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ openTxn.Abort();
+ throw e;
+ }
+
+ Transaction writeTxn = env.BeginTransaction();
+ try
+ {
+ /*
+ * Writing 10 large records into in-memory logging
+ * database should throw FullLogBufferException since
+ * the amount of put data is larger than buffer size.
+ */
+ byte[] byteArr = new byte[204800];
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(byteArr), writeTxn);
+ writeTxn.Commit();
+ }
+ catch (Exception e)
+ {
+ writeTxn.Abort();
+ throw e;
+ }
+ finally
+ {
+ db.Close(true);
+ }
+ }
+ catch (FullLogBufferException e)
+ {
+ Assert.AreEqual(ErrorCodes.DB_LOG_BUFFER_FULL, e.ErrorCode);
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestLoggingSystemStats()
+ {
+ testName = "TestLoggingSystemStats";
+ testHome = testFixtureHome + "/" + testName;
+ string logDir = "./";
+
+ Configuration.ClearDir(testHome);
+ Directory.CreateDirectory(testHome + "/" + logDir);
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.AutoCommit = true;
+ cfg.UseLocking = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize = new CacheInfo(0, 1048576, 1);
+
+ cfg.LogSystemCfg = new LogConfig();
+ cfg.LogSystemCfg.AutoRemove = false;
+ cfg.LogSystemCfg.BufferSize = 10240;
+ cfg.LogSystemCfg.Dir = logDir;
+ cfg.LogSystemCfg.FileMode = 755;
+ cfg.LogSystemCfg.ForceSync = true;
+ cfg.LogSystemCfg.InMemory = false;
+ cfg.LogSystemCfg.MaxFileSize = 1048576;
+ cfg.LogSystemCfg.NoBuffer = false;
+ cfg.LogSystemCfg.RegionSize = 204800;
+ cfg.LogSystemCfg.ZeroOnCreate = true;
+
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, cfg);
+
+ LogStats stats = env.LoggingSystemStats();
+ env.PrintLoggingSystemStats();
+ Assert.AreEqual(10240, stats.BufferSize);
+ Assert.AreEqual(1, stats.CurrentFile);
+ Assert.AreNotEqual(0, stats.CurrentOffset);
+ Assert.AreEqual(1048576, stats.FileSize);
+ Assert.AreNotEqual(0, stats.MagicNumber);
+ Assert.AreNotEqual(0, stats.PermissionsMode);
+ Assert.AreEqual(1, stats.Records);
+ Assert.AreNotEqual(0, stats.RegionLockNoWait);
+ Assert.LessOrEqual(204800, stats.RegionSize);
+ Assert.AreNotEqual(0, stats.Version);
+
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(testName + ".db", dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction writeTxn = env.BeginTransaction();
+ byte[] byteArr = new byte[1024];
+ for (int i = 0; i < 1000; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(byteArr), writeTxn);
+ writeTxn.Commit();
+
+ stats = env.LoggingSystemStats();
+ Assert.AreNotEqual(0, stats.Bytes);
+ Assert.AreNotEqual(0, stats.BytesSinceCheckpoint);
+ Assert.AreNotEqual(0, stats.DiskFileNumber);
+ Assert.AreNotEqual(0, stats.DiskOffset);
+ Assert.AreNotEqual(0, stats.MaxCommitsPerFlush);
+ Assert.AreNotEqual(0, stats.MBytes);
+ Assert.AreNotEqual(0, stats.MBytesSinceCheckpoint);
+ Assert.AreNotEqual(0, stats.MinCommitsPerFlush);
+ Assert.AreNotEqual(0, stats.OverflowWrites);
+ Assert.AreNotEqual(0, stats.Syncs);
+ Assert.AreNotEqual(0, stats.Writes);
+ Assert.AreEqual(0, stats.Reads);
+ Assert.AreEqual(0, stats.RegionLockWait);
+
+ stats = env.LoggingSystemStats(true);
+ stats = env.LoggingSystemStats();
+ Assert.AreEqual(0, stats.Bytes);
+ Assert.AreEqual(0, stats.BytesSinceCheckpoint);
+ Assert.AreEqual(0, stats.MaxCommitsPerFlush);
+ Assert.AreEqual(0, stats.MBytes);
+ Assert.AreEqual(0, stats.MBytesSinceCheckpoint);
+ Assert.AreEqual(0, stats.MinCommitsPerFlush);
+ Assert.AreEqual(0, stats.OverflowWrites);
+ Assert.AreEqual(0, stats.Syncs);
+ Assert.AreEqual(0, stats.Writes);
+ Assert.AreEqual(0, stats.Reads);
+
+ env.PrintLoggingSystemStats(true, true);
+
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestLsn()
+ {
+ testName = "TestLsn";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ LSN lsn = new LSN(12, 411);
+ Assert.AreEqual(12, lsn.LogFileNumber);
+ Assert.AreEqual(411, lsn.Offset);
+
+ LSN newLsn = new LSN(15, 410);
+ Assert.AreEqual(0, LSN.Compare(lsn, lsn));
+ Assert.Greater(0, LSN.Compare(lsn, newLsn));
+ }
+
+ public static void Confirm(XmlElement
+ xmlElement, LogConfig logConfig, bool compulsory)
+ {
+ Configuration.ConfirmBool(xmlElement, "AutoRemove",
+ logConfig.AutoRemove, compulsory);
+ Configuration.ConfirmUint(xmlElement, "BufferSize",
+ logConfig.BufferSize, compulsory);
+ Configuration.ConfirmString(xmlElement, "Dir",
+ logConfig.Dir, compulsory);
+ Configuration.ConfirmInt(xmlElement, "FileMode",
+ logConfig.FileMode, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ForceSync",
+ logConfig.ForceSync, compulsory);
+ Configuration.ConfirmBool(xmlElement, "InMemory",
+ logConfig.InMemory, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MaxFileSize",
+ logConfig.MaxFileSize, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoBuffer",
+ logConfig.NoBuffer, compulsory);
+ Configuration.ConfirmUint(xmlElement, "RegionSize",
+ logConfig.RegionSize, compulsory);
+ Configuration.ConfirmBool(xmlElement, "ZeroOnCreate",
+ logConfig.ZeroOnCreate, compulsory);
+ }
+
+ public static void Config(XmlElement
+ xmlElement, ref LogConfig logConfig, bool compulsory)
+ {
+ uint uintValue = new uint();
+ int intValue = new int();
+
+ Configuration.ConfigBool(xmlElement, "AutoRemove",
+ ref logConfig.AutoRemove, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "BufferSize",
+ ref uintValue, compulsory))
+ logConfig.BufferSize = uintValue;
+ Configuration.ConfigString(xmlElement, "Dir",
+ ref logConfig.Dir, compulsory);
+ if (Configuration.ConfigInt(xmlElement, "FileMode",
+ ref intValue, compulsory))
+ logConfig.FileMode = intValue;
+ Configuration.ConfigBool(xmlElement, "ForceSync",
+ ref logConfig.ForceSync, compulsory);
+ Configuration.ConfigBool(xmlElement, "InMemory",
+ ref logConfig.InMemory, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "MaxFileSize",
+ ref uintValue, compulsory))
+ logConfig.MaxFileSize = uintValue;
+ Configuration.ConfigBool(xmlElement, "NoBuffer",
+ ref logConfig.NoBuffer, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "RegionSize",
+ ref uintValue, compulsory))
+ logConfig.RegionSize = uintValue;
+ Configuration.ConfigBool(xmlElement, "ZeroOnCreate",
+ ref logConfig.ZeroOnCreate, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/LogCursorTest.cs b/db-4.8.30/test/scr037/LogCursorTest.cs
new file mode 100644
index 0000000..32a793a
--- /dev/null
+++ b/db-4.8.30/test/scr037/LogCursorTest.cs
@@ -0,0 +1,321 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class LogCursorTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+ DatabaseEnvironment env;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "LogCursorTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ /*
+ * Delete existing test ouput directory and files specified
+ * for the current test fixture and then create a new one.
+ */
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestClose()
+ {
+ testName = "TestClose";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ LogCursor logCursor;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ logCursor = env.GetLogCursor();
+
+ // Close the log cursor and env.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TesCurrentLSN()
+ {
+ testName = "TesCurrentLSN";
+ testHome = testFixtureHome + "/" + testName;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ /*
+ * Move the cursor to the beginning of the #1 log file.
+ * Get the current LSN and confirm that is the position
+ * the cursor is moved to.
+ */
+ LSN lsn = new LSN(1, 0);
+ logCursor.Move(lsn);
+ Assert.AreEqual(lsn.LogFileNumber,
+ logCursor.CurrentLSN.LogFileNumber);
+ Assert.AreEqual(lsn.Offset, logCursor.CurrentLSN.Offset);
+
+ // Close all.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestCurrentRecord()
+ {
+ testName = "TestCurrentRecord";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ /*
+ * Move the cursor to the beginning of the #1 log file.
+ * Get the current LSN and confirm that is the position
+ * the cursor is moved to.
+ */
+ LSN lsn = new LSN(1, 0);
+ logCursor.Move(lsn);
+ Assert.IsNotNull(logCursor.CurrentRecord.Data);
+
+ // Close all.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMove()
+ {
+ testName = "TestMove";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ // Move the cursor to specified location in log files.
+ LSN lsn = new LSN(1, 0);
+ Assert.IsTrue(logCursor.Move(lsn));
+
+ // Close all.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ /*
+ [Test]
+ public void TestMoveFirst()
+ {
+ testName = "TestMoveFirst";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ // Move to the first LSN in log file.
+ Assert.IsTrue(logCursor.MoveFirst());
+
+ // Confirm offset of the fist LSN should be 0.
+ Assert.AreEqual(0, logCursor.CurrentLSN.Offset);
+
+ // Close all.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+ */
+
+ [Test]
+ public void TestMoveLast()
+ {
+ testName = "TestMoveLast";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ // Move to the last LSN in log file.
+ Assert.IsTrue(logCursor.MoveLast());
+
+ // The offset of last LSN shouldn't be 0.
+ Assert.AreNotEqual(0, logCursor.CurrentLSN.Offset);
+
+ // Close all.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMoveNext()
+ {
+ testName = "TestMoveNext";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ logCursor.MoveLast();
+ DatabaseEntry curRec = logCursor.CurrentRecord;
+ for (int i = 0; i < 1000; i++)
+ db.Append(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("new data")));
+
+ Assert.IsTrue(logCursor.MoveNext());
+
+ logCursor.MoveNext();
+
+ // Close the log cursor.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+
+ [Test]
+ public void TestMovePrev()
+ {
+ testName = "TestMovePrev";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ LSN lsn;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ // Get the last two LSN in log file.
+ logCursor.MoveLast();
+ Assert.IsTrue(logCursor.MovePrev());
+
+ // Close all.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestRefresh()
+ {
+ testName = "TestRefresh";
+ testHome = testFixtureHome + "/" + testName;
+ DatabaseEnvironment env;
+ LSN lsn;
+ RecnoDatabase db;
+
+ Logging(testHome, testName, out env, out db);
+
+ // Get log cursor to read/write log.
+ LogCursor logCursor = env.GetLogCursor();
+
+ // Move the cursor to the last record.
+ logCursor.MoveLast();
+ DatabaseEntry curRec = logCursor.CurrentRecord;
+
+ // Put some new records into database.
+ for (int i = 0; i < 10; i++)
+ db.Append(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("new data")));
+
+ // Get the current record that cursor points to.
+ logCursor.Refresh();
+
+ // It shouldn't be changed.
+ Assert.AreEqual(curRec.Data,
+ logCursor.CurrentRecord.Data);
+
+ // Close all.
+ logCursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ /*
+ * Open environment, database and write data into database.
+ * Generated log files are put under testHome.
+ */
+ public void Logging(string home, string dbName,
+ out DatabaseEnvironment env, out RecnoDatabase recnoDB)
+ {
+ string dbFileName = dbName + ".db";
+
+ Configuration.ClearDir(home);
+
+ // Open environment with logging subsystem.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLogging = true;
+ envConfig.LogSystemCfg = new LogConfig();
+ envConfig.LogSystemCfg.FileMode = 755;
+ envConfig.LogSystemCfg.ZeroOnCreate = true;
+ envConfig.UseMPool = true;
+ env = DatabaseEnvironment.Open(home, envConfig);
+
+ /*
+ * Open recno database, write 100000 records into
+ * the database and close it.
+ */
+ RecnoDatabaseConfig recnoConfig =
+ new RecnoDatabaseConfig();
+ recnoConfig.Creation = CreatePolicy.IF_NEEDED;
+ recnoConfig.Env = env;
+ // The db needs mpool to open.
+ recnoConfig.NoMMap = false;
+ recnoDB = RecnoDatabase.Open(dbFileName,
+ recnoConfig);
+ for (int i = 0; i < 1000; i++)
+ recnoDB.Append(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")));
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/MPoolConfigTest.cs b/db-4.8.30/test/scr037/MPoolConfigTest.cs
new file mode 100644
index 0000000..e92e4c0
--- /dev/null
+++ b/db-4.8.30/test/scr037/MPoolConfigTest.cs
@@ -0,0 +1,85 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class MPoolConfigTest
+ {
+ private string testFixtureName;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "MPoolConfigTest";
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+
+ // Config and confirm mpool subsystem configuration.
+ MPoolConfig mpoolConfig = new MPoolConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ Config(xmlElem, ref mpoolConfig, true);
+ Confirm(xmlElem, mpoolConfig, true);
+ }
+
+
+ public static void Confirm(XmlElement
+ xmlElement, MPoolConfig mpoolConfig, bool compulsory)
+ {
+ Configuration.ConfirmCacheSize(xmlElement,
+ "CacheSize", mpoolConfig.CacheSize, compulsory);
+ Configuration.ConfirmCacheSize(xmlElement,
+ "MaxCacheSize", mpoolConfig.MaxCacheSize,
+ compulsory);
+ Configuration.ConfirmInt(xmlElement, "MaxOpenFiles",
+ mpoolConfig.MaxOpenFiles, compulsory);
+ Configuration.ConfirmUint(xmlElement,
+ "MMapSize",
+ mpoolConfig.MMapSize, compulsory);
+ Configuration.ConfirmMaxSequentialWrites(xmlElement,
+ "MaxSequentialWrites",
+ mpoolConfig.SequentialWritePause,
+ mpoolConfig.MaxSequentialWrites, compulsory);
+ }
+
+ public static void Config(XmlElement
+ xmlElement, ref MPoolConfig mpoolConfig, bool compulsory)
+ {
+ uint uintValue = new uint();
+ int intValue = new int();
+
+ Configuration.ConfigCacheInfo(xmlElement,
+ "CacheSize", ref mpoolConfig.CacheSize, compulsory);
+ Configuration.ConfigCacheInfo(xmlElement,
+ "MaxCacheSize", ref mpoolConfig.MaxCacheSize,
+ compulsory);
+ if (Configuration.ConfigInt(xmlElement, "MaxOpenFiles",
+ ref intValue, compulsory))
+ mpoolConfig.MaxOpenFiles = intValue;
+ Configuration.ConfigMaxSequentialWrites(
+ xmlElement, "MaxSequentialWrites", mpoolConfig,
+ compulsory);
+ if (Configuration.ConfigUint(xmlElement,
+ "MMapSize", ref uintValue, compulsory))
+ mpoolConfig.MMapSize = uintValue;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/MutexConfigTest.cs b/db-4.8.30/test/scr037/MutexConfigTest.cs
new file mode 100644
index 0000000..d2d7e5f
--- /dev/null
+++ b/db-4.8.30/test/scr037/MutexConfigTest.cs
@@ -0,0 +1,82 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class MutexConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "MutexConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+
+ /*
+ * Configure the fields/properties and see if
+ * they are updated successfully.
+ */
+ MutexConfig lockingConfig = new MutexConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ Config(xmlElem, ref lockingConfig, true);
+ Confirm(xmlElem, lockingConfig, true);
+ }
+
+
+ public static void Confirm(XmlElement
+ xmlElement, MutexConfig mutexConfig, bool compulsory)
+ {
+ Configuration.ConfirmUint(xmlElement, "Alignment",
+ mutexConfig.Alignment, compulsory);
+ Configuration.ConfirmUint(xmlElement, "Increment",
+ mutexConfig.Increment, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MaxMutexes",
+ mutexConfig.MaxMutexes, compulsory);
+ Configuration.ConfirmUint(xmlElement,
+ "NumTestAndSetSpins",
+ mutexConfig.NumTestAndSetSpins, compulsory);
+ }
+
+ public static void Config(XmlElement
+ xmlElement, ref MutexConfig mutexConfig, bool compulsory)
+ {
+ uint value = new uint();
+ if (Configuration.ConfigUint(xmlElement, "Alignment",
+ ref value, compulsory))
+ mutexConfig.Alignment = value;
+ if (Configuration.ConfigUint(xmlElement, "Increment",
+ ref value, compulsory))
+ mutexConfig.Increment = value;
+ if (Configuration.ConfigUint(xmlElement, "MaxMutexes",
+ ref value, compulsory))
+ mutexConfig.MaxMutexes = value;
+ if (Configuration.ConfigUint(xmlElement,
+ "NumTestAndSetSpins", ref value, compulsory))
+ mutexConfig.NumTestAndSetSpins = value;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/MutexTest.cs b/db-4.8.30/test/scr037/MutexTest.cs
new file mode 100644
index 0000000..0c9ddc6
--- /dev/null
+++ b/db-4.8.30/test/scr037/MutexTest.cs
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class MutexTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ private BTreeDatabase TestDB;
+ private BerkeleyDB.Mutex TestMutex;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "MutexTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestGetAndFreeMutex()
+ {
+ testName = "TestGetAndFreeMutex";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ BerkeleyDB.Mutex mutex = env.GetMutex(true, true);
+ mutex.Dispose();
+ env.Close();
+ }
+
+ [Test]
+ public void TestLockAndUnlockMutex()
+ {
+ testName = "TestLockAndUnlockMutex";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open an environment without locking and
+ * deadlock detection.
+ */
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.FreeThreaded = true;
+ envConfig.UseLogging = true;
+ envConfig.Create = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ // Open a database.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ TestDB = BTreeDatabase.Open(
+ testName + ".db", dbConfig);
+
+ // Get a mutex which will be used in two threads.
+ TestMutex = env.GetMutex(true, false);
+
+ // Begin two threads to write records into database.
+ Thread mutexThread1 = new Thread(
+ new ThreadStart(MutexThread1));
+ Thread mutexThread2 = new Thread(
+ new ThreadStart(MutexThread2));
+ mutexThread1.Start();
+ mutexThread2.Start();
+ mutexThread1.Join();
+ mutexThread2.Join();
+
+ // Free the mutex.
+ TestMutex.Dispose();
+
+ // Close all.
+ TestDB.Close();
+ env.Close();
+ }
+
+ public void MutexThread1()
+ {
+ TestMutex.Lock();
+ for (int i = 0; i < 100; i++)
+ TestDB.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(new byte[102400]));
+ TestMutex.Unlock();
+ }
+
+ public void MutexThread2()
+ {
+ TestMutex.Lock();
+ for (int i = 0; i < 100; i++)
+ TestDB.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(new byte[102400]));
+ TestMutex.Unlock();
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/QueueDatabaseConfigTest.cs b/db-4.8.30/test/scr037/QueueDatabaseConfigTest.cs
new file mode 100644
index 0000000..9586602
--- /dev/null
+++ b/db-4.8.30/test/scr037/QueueDatabaseConfigTest.cs
@@ -0,0 +1,88 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class QueueDatabaseConfigTest : DatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "QueueDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ new public void TestConfigWithoutEnv()
+ {
+ testName = "TestConfigWithoutEnv";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ QueueDatabaseConfig queueDBConfig =
+ new QueueDatabaseConfig();
+ Config(xmlElem, ref queueDBConfig, true);
+ Confirm(xmlElem, queueDBConfig, true);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ QueueDatabaseConfig queueDBConfig, bool compulsory)
+ {
+ DatabaseConfig dbConfig = queueDBConfig;
+ Confirm(xmlElement, dbConfig, compulsory);
+
+ // Confirm Queue database specific configuration
+ Configuration.ConfirmBool(xmlElement, "ConsumeInOrder",
+ queueDBConfig.ConsumeInOrder, compulsory);
+ Configuration.ConfirmCreatePolicy(xmlElement, "Creation",
+ queueDBConfig.Creation, compulsory);
+ Configuration.ConfirmUint(xmlElement, "Length",
+ queueDBConfig.Length, compulsory);
+ Configuration.ConfirmInt(xmlElement, "PadByte",
+ queueDBConfig.PadByte, compulsory);
+ Configuration.ConfirmUint(xmlElement, "ExtentSize",
+ queueDBConfig.ExtentSize, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref QueueDatabaseConfig queueDBConfig, bool compulsory)
+ {
+ uint uintValue = new uint();
+ int intValue = new int();
+ DatabaseConfig dbConfig = queueDBConfig;
+ Config(xmlElement, ref dbConfig, compulsory);
+
+ // Configure specific fields/properties of Queue database
+ Configuration.ConfigBool(xmlElement, "ConsumeInOrder",
+ ref queueDBConfig.ConsumeInOrder, compulsory);
+ Configuration.ConfigCreatePolicy(xmlElement, "Creation",
+ ref queueDBConfig.Creation, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "Length",
+ ref uintValue, compulsory))
+ queueDBConfig.Length = uintValue;
+ if (Configuration.ConfigInt(xmlElement, "PadByte",
+ ref intValue, compulsory))
+ queueDBConfig.PadByte = intValue;
+ if (Configuration.ConfigUint(xmlElement, "ExtentSize",
+ ref uintValue, compulsory))
+ queueDBConfig.ExtentSize = uintValue;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/QueueDatabaseTest.cs b/db-4.8.30/test/scr037/QueueDatabaseTest.cs
new file mode 100644
index 0000000..4a0b03b
--- /dev/null
+++ b/db-4.8.30/test/scr037/QueueDatabaseTest.cs
@@ -0,0 +1,646 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class QueueDatabaseTest : DatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "QueueDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestAppendWithoutTxn()
+ {
+ testName = "TestAppendWithoutTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string queueDBFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ QueueDatabaseConfig queueConfig = new QueueDatabaseConfig();
+ queueConfig.Creation = CreatePolicy.ALWAYS;
+ queueConfig.Length = 1000;
+ QueueDatabase queueDB = QueueDatabase.Open(
+ queueDBFileName, queueConfig);
+
+ byte[] byteArr = new byte[4];
+ byteArr = BitConverter.GetBytes((int)1);
+ DatabaseEntry data = new DatabaseEntry(byteArr);
+ uint recno = queueDB.Append(data);
+
+ // Confirm that the recno is larger than 0.
+ Assert.AreNotEqual(0, recno);
+
+ // Confirm that the record exists in the database.
+ byteArr = BitConverter.GetBytes(recno);
+ DatabaseEntry key = new DatabaseEntry();
+ key.Data = byteArr;
+ Assert.IsTrue(queueDB.Exists(key));
+ queueDB.Close();
+ }
+
+ [Test]
+ public void TestAppendWithTxn()
+ {
+ testName = "TestAppendWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string queueDBFileName = testHome + "/" + testName + ".db";
+ string queueDBName =
+ Path.GetFileNameWithoutExtension(queueDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ Transaction txn = env.BeginTransaction();
+
+ QueueDatabaseConfig queueConfig = new QueueDatabaseConfig();
+ queueConfig.Creation = CreatePolicy.ALWAYS;
+ queueConfig.Env = env;
+ queueConfig.Length = 1000;
+
+ /* If environmnet home is set, the file name in
+ * Open() is the relative path.
+ */
+ QueueDatabase queueDB = QueueDatabase.Open(
+ queueDBName, queueConfig, txn);
+ DatabaseEntry data;
+ int i = 1000;
+ try
+ {
+ while (i > 0)
+ {
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ queueDB.Append(data, txn);
+ i--;
+ }
+ txn.Commit();
+ }
+ catch
+ {
+ txn.Abort();
+ }
+ finally
+ {
+ queueDB.Close();
+ env.Close();
+ }
+
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestConsumeWithTxn()
+ {
+ testName = "TestConsumeWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string queueDBFileName = testHome + "/" + testName + ".db";
+ string queueDBName = Path.GetFileName(queueDBFileName);
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+ Transaction txn = env.BeginTransaction();
+
+ QueueDatabaseConfig queueConfig =
+ new QueueDatabaseConfig();
+ queueConfig.Creation = CreatePolicy.ALWAYS;
+ queueConfig.Env = env;
+ queueConfig.Length = 1000;
+ QueueDatabase queueDB = QueueDatabase.Open(
+ queueDBName, queueConfig, txn);
+
+ int i = 1;
+ DatabaseEntry data;
+ DatabaseEntry getData = new DatabaseEntry();
+ while (i <= 10)
+ {
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes(i.ToString()));
+ queueDB.Append(data, txn);
+ if (i == 5)
+ {
+ getData = data;
+ }
+ i++;
+ }
+
+ KeyValuePair<uint, DatabaseEntry> pair = queueDB.Consume(false, txn);
+
+ queueDB.Close();
+ txn.Commit();
+ env.Close();
+
+ Database db = Database.Open(queueDBFileName,
+ new QueueDatabaseConfig());
+ try
+ {
+ DatabaseEntry key =
+ new DatabaseEntry(BitConverter.GetBytes(pair.Key));
+ db.Get(key);
+ }
+ catch (NotFoundException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ db.Close();
+ }
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestConsumeWithoutTxn()
+ {
+ testName = "TestConsumeWithoutTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string queueDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ QueueDatabaseConfig queueConfig =
+ new QueueDatabaseConfig();
+ queueConfig.Creation = CreatePolicy.ALWAYS;
+ queueConfig.ErrorPrefix = testName;
+ queueConfig.Length = 1000;
+
+ QueueDatabase queueDB = QueueDatabase.Open(
+ queueDBFileName, queueConfig);
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ queueDB.Append(data);
+
+ DatabaseEntry consumeData = new DatabaseEntry();
+ KeyValuePair<uint, DatabaseEntry> pair = queueDB.Consume(false);
+ try
+ {
+ DatabaseEntry key =
+ new DatabaseEntry(BitConverter.GetBytes(pair.Key));
+ queueDB.Get(key);
+ }
+ catch (NotFoundException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ queueDB.Close();
+ }
+ }
+
+ public void TestCursor()
+ {
+ testName = "TestCursor";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursur(testHome + "/" + testName + ".db", false);
+ }
+
+ public void TestCursorWithConfig()
+ {
+ testName = "TestCursorWithConfig";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursur(testHome + "/" + testName + ".db", true);
+ }
+
+ public void GetCursur(string dbFileName, bool ifConfig)
+ {
+ QueueDatabaseConfig dbConfig = new QueueDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Length = 100;
+ QueueDatabase db = QueueDatabase.Open(dbFileName, dbConfig);
+ Cursor cursor;
+ if (ifConfig == false)
+ cursor = db.Cursor();
+ else
+ cursor = db.Cursor(new CursorConfig());
+ cursor.Close();
+ db.Close();
+ }
+
+ //[Test]
+ //public void TestDupCompare()
+ //{
+ // testName = "TestDupCompare";
+ // testHome = testFixtureHome + "/" + testName;
+ // string dbFileName = testHome + "/" + testName + ".db";
+
+ // Configuration.ClearDir(testHome);
+
+ // QueueDatabaseConfig dbConfig = new QueueDatabaseConfig();
+ // dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ // dbConfig.DuplicateCompare = new EntryComparisonDelegate(dbIntCompare);
+ // dbConfig.Length = 10;
+ // dbConfig.PageSize = 40860;
+ // try
+ // {
+ // QueueDatabase db = QueueDatabase.Open(dbFileName, dbConfig);
+ // int ret = db.DupCompare(new DatabaseEntry(BitConverter.GetBytes(255)),
+ // new DatabaseEntry(BitConverter.GetBytes(257)));
+ // Assert.Greater(0, ret);
+ // db.Close();
+ // }
+ // catch (DatabaseException e)
+ // {
+ // Console.WriteLine(e.Message);
+ // }
+ //}
+
+ private int dbIntCompare(DatabaseEntry dbt1,
+ DatabaseEntry dbt2)
+ {
+ int a, b;
+ a = BitConverter.ToInt16(dbt1.Data, 0);
+ b = BitConverter.ToInt16(dbt2.Data, 0);
+ return a - b;
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestKeyEmptyException()
+ {
+ testName = "TestKeyEmptyException";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLocking = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ QueueDatabase db;
+ try
+ {
+ Transaction openTxn = env.BeginTransaction();
+ try
+ {
+ QueueDatabaseConfig queueConfig =
+ new QueueDatabaseConfig();
+ queueConfig.Creation = CreatePolicy.IF_NEEDED;
+ queueConfig.Length = 10;
+ queueConfig.Env = env;
+ db = QueueDatabase.Open(testName + ".db",
+ queueConfig, openTxn);
+ openTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ openTxn.Abort();
+ throw e;
+ }
+
+ Transaction cursorTxn = env.BeginTransaction();
+ Cursor cursor;
+ try
+ {
+ /*
+ * Put a record into queue database with
+ * cursor and abort the operation.
+ */
+ cursor = db.Cursor(cursorTxn);
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(BitConverter.GetBytes((int)10)),
+ new DatabaseEntry(ASCIIEncoding.ASCII.GetBytes("data")));
+ cursor.Add(pair);
+ cursor.Close();
+ cursorTxn.Abort();
+ }
+ catch (DatabaseException e)
+ {
+ cursorTxn.Abort();
+ db.Close();
+ throw e;
+ }
+
+ Transaction delTxn = env.BeginTransaction();
+ try
+ {
+ /*
+ * The put operation is aborted in the queue
+ * database so querying if the record still exists
+ * throws KeyEmptyException.
+ */
+ db.Exists(new DatabaseEntry(
+ BitConverter.GetBytes((int)10)), delTxn);
+ delTxn.Commit();
+ }
+ catch (DatabaseException e)
+ {
+ delTxn.Abort();
+ throw e;
+ }
+ finally
+ {
+ db.Close();
+ }
+ }
+ catch (KeyEmptyException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestOpenExistingQueueDB()
+ {
+ testName = "TestOpenExistingQueueDB";
+ testHome = testFixtureHome + "/" + testName;
+ string queueDBFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ QueueDatabaseConfig queueConfig = new QueueDatabaseConfig();
+ queueConfig.Creation = CreatePolicy.ALWAYS;
+ QueueDatabase queueDB = QueueDatabase.Open(
+ queueDBFileName, queueConfig);
+ queueDB.Close();
+
+ DatabaseConfig dbConfig = new DatabaseConfig();
+ Database db = Database.Open(queueDBFileName, dbConfig);
+ Assert.AreEqual(db.Type, DatabaseType.QUEUE);
+ db.Close();
+ }
+
+ [Test]
+ public void TestOpenNewQueueDB()
+ {
+ testName = "TestOpenNewQueueDB";
+ testHome = testFixtureHome + "/" + testName;
+ string queueDBFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Configure all fields/properties in queue database.
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ QueueDatabaseConfig queueConfig = new QueueDatabaseConfig();
+ QueueDatabaseConfigTest.Config(xmlElem, ref queueConfig, true);
+ queueConfig.Feedback = new DatabaseFeedbackDelegate(DbFeedback);
+
+ // Open the queue database with above configuration.
+ QueueDatabase queueDB = QueueDatabase.Open(
+ queueDBFileName, queueConfig);
+
+ // Check the fields/properties in opened queue database.
+ Confirm(xmlElem, queueDB, true);
+
+ queueDB.Close();
+ }
+
+ private void DbFeedback(DatabaseFeedbackEvent opcode, int percent)
+ {
+ if (opcode == DatabaseFeedbackEvent.UPGRADE)
+ Console.WriteLine("Update for %d%", percent);
+
+ if (opcode == DatabaseFeedbackEvent.VERIFY)
+ Console.WriteLine("Vertify for %d", percent);
+ }
+
+ [Test, ExpectedException(typeof(NotFoundException))]
+ public void TestPutToQueue()
+ {
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+
+ testName = "TestPutQueue";
+ testHome = testFixtureHome + "/" + testName;
+ string queueDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ QueueDatabaseConfig queueConfig =
+ new QueueDatabaseConfig();
+ queueConfig.Length = 512;
+ queueConfig.Creation = CreatePolicy.ALWAYS;
+ using (QueueDatabase queueDB = QueueDatabase.Open(
+ queueDBFileName, queueConfig))
+ {
+ DatabaseEntry key = new DatabaseEntry();
+ key.Data = BitConverter.GetBytes((int)100);
+ DatabaseEntry data = new DatabaseEntry(
+ BitConverter.GetBytes((int)1));
+ queueDB.Put(key, data);
+ pair = queueDB.GetBoth(key, data);
+ }
+ }
+
+ [Test]
+ public void TestStats()
+ {
+ testName = "TestStats";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" +
+ testName + ".db";
+ Configuration.ClearDir(testHome);
+
+ QueueDatabaseConfig dbConfig =
+ new QueueDatabaseConfig();
+ ConfigCase1(dbConfig);
+ QueueDatabase db = QueueDatabase.Open(dbFileName, dbConfig);
+
+ QueueStats stats = db.Stats();
+ ConfirmStatsPart1Case1(stats);
+ db.PrintFastStats(true);
+
+ // Put 500 records into the database.
+ PutRecordCase1(db, null);
+
+ stats = db.Stats();
+ ConfirmStatsPart2Case1(stats);
+ db.PrintFastStats();
+
+ db.Close();
+ }
+
+ [Test]
+ public void TestStatsInTxn()
+ {
+ testName = "TestStatsInTxn";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, false);
+ }
+
+ [Test]
+ public void TestStatsWithIsolation()
+ {
+ testName = "TestStatsWithIsolation";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, true);
+ }
+
+ public void StatsInTxn(string home, string name, bool ifIsolation)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ EnvConfigCase1(envConfig);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ Transaction openTxn = env.BeginTransaction();
+ QueueDatabaseConfig dbConfig =
+ new QueueDatabaseConfig();
+ ConfigCase1(dbConfig);
+ dbConfig.Env = env;
+ QueueDatabase db = QueueDatabase.Open(name + ".db",
+ dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction statsTxn = env.BeginTransaction();
+ QueueStats stats;
+ if (ifIsolation == false)
+ stats = db.Stats(statsTxn);
+ else
+ stats = db.Stats(statsTxn, Isolation.DEGREE_ONE);
+
+ ConfirmStatsPart1Case1(stats);
+ db.PrintStats(true);
+
+ // Put 500 records into the database.
+ PutRecordCase1(db, statsTxn);
+
+ if (ifIsolation == false)
+ stats = db.Stats(statsTxn);
+ else
+ stats = db.Stats(statsTxn, Isolation.DEGREE_TWO);
+ ConfirmStatsPart2Case1(stats);
+ db.PrintStats();
+
+ statsTxn.Commit();
+ db.Close();
+ env.Close();
+ }
+
+ public void EnvConfigCase1(DatabaseEnvironmentConfig cfg)
+ {
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ }
+
+ public void ConfigCase1(QueueDatabaseConfig dbConfig)
+ {
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.PageSize = 4096;
+ dbConfig.ExtentSize = 1024;
+ dbConfig.Length = 4000;
+ dbConfig.PadByte = 32;
+ }
+
+ public void PutRecordCase1(QueueDatabase db, Transaction txn)
+ {
+ byte[] bigArray = new byte[4000];
+ for (int i = 1; i <= 100; i++)
+ {
+ if (txn == null)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+ else
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)), txn);
+ }
+ DatabaseEntry key = new DatabaseEntry(BitConverter.GetBytes((int)100));
+ for (int i = 100; i <= 500; i++)
+ {
+ if (txn == null)
+ db.Put(key, new DatabaseEntry(bigArray));
+ else
+ db.Put(key, new DatabaseEntry(bigArray), txn);
+ }
+ }
+
+ public void ConfirmStatsPart1Case1(QueueStats stats)
+ {
+ Assert.AreEqual(1, stats.FirstRecordNumber);
+ Assert.AreNotEqual(0, stats.MagicNumber);
+ Assert.AreEqual(1, stats.NextRecordNumber);
+ Assert.AreEqual(4096, stats.PageSize);
+ Assert.AreEqual(1024, stats.PagesPerExtent);
+ Assert.AreEqual(4000, stats.RecordLength);
+ Assert.AreEqual(32, stats.RecordPadByte);
+ Assert.AreEqual(4, stats.Version);
+ }
+
+ public void ConfirmStatsPart2Case1(QueueStats stats)
+ {
+ Assert.AreNotEqual(0, stats.DataPages);
+ Assert.AreEqual(0, stats.DataPagesBytesFree);
+ Assert.AreEqual(0, stats.MetadataFlags);
+ Assert.AreEqual(100, stats.nData);
+ Assert.AreEqual(100, stats.nKeys);
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ QueueDatabase queueDB, bool compulsory)
+ {
+ DatabaseTest.Confirm(xmlElem, queueDB, compulsory);
+
+ // Confirm queue database specific field/property
+ Configuration.ConfirmUint(xmlElem, "ExtentSize",
+ queueDB.ExtentSize, compulsory);
+ Configuration.ConfirmBool(xmlElem, "ConsumeInOrder",
+ queueDB.InOrder, compulsory);
+ Configuration.ConfirmInt(xmlElem, "PadByte",
+ queueDB.PadByte, compulsory);
+ Assert.AreEqual(DatabaseType.QUEUE, queueDB.Type);
+ string type = queueDB.Type.ToString();
+ Assert.IsNotNull(type);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/README b/db-4.8.30/test/scr037/README
new file mode 100644
index 0000000..3e02730
--- /dev/null
+++ b/db-4.8.30/test/scr037/README
@@ -0,0 +1 @@
+test/scr037 is reserved for the forthcoming C# API tests \ No newline at end of file
diff --git a/db-4.8.30/test/scr037/RecnoCursorTest.cs b/db-4.8.30/test/scr037/RecnoCursorTest.cs
new file mode 100644
index 0000000..790372b
--- /dev/null
+++ b/db-4.8.30/test/scr037/RecnoCursorTest.cs
@@ -0,0 +1,257 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class RecnoCursorTest
+ {
+ private string testFixtureName;
+ private string testFixtureHome;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "RecnoCursorTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ }
+
+ [Test]
+ public void TestCursor()
+ {
+ testName = "TestCursor";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursorWithImplicitTxn(testHome,
+ testName + ".db", false);
+ }
+
+ [Test]
+ public void TestCursorWithConfig()
+ {
+ testName = "TestCursorWithConfig";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursorWithImplicitTxn(testHome,
+ testName + ".db", true);
+ }
+
+ public void GetCursorWithImplicitTxn(string home,
+ string dbFile, bool ifConfig)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseCDB = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ RecnoDatabaseConfig dbConfig =
+ new RecnoDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ RecnoDatabase db = RecnoDatabase.Open(dbFile,
+ dbConfig);
+
+ RecnoCursor cursor;
+ if (ifConfig == false)
+ cursor = db.Cursor();
+ else
+ cursor = db.Cursor(new CursorConfig());
+
+ cursor.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestCursorInTxn()
+ {
+ testName = "TestCursorInTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursorWithExplicitTxn(testHome,
+ testName + ".db", false);
+ }
+
+ [Test]
+ public void TestConfigedCursorInTxn()
+ {
+ testName = "TestConfigedCursorInTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ GetCursorWithExplicitTxn(testHome,
+ testName + ".db", true);
+ }
+
+ public void GetCursorWithExplicitTxn(string home,
+ string dbFile, bool ifConfig)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ Transaction openTxn = env.BeginTransaction();
+ RecnoDatabaseConfig dbConfig =
+ new RecnoDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ RecnoDatabase db = RecnoDatabase.Open(dbFile,
+ dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction cursorTxn = env.BeginTransaction();
+ RecnoCursor cursor;
+ if (ifConfig == false)
+ cursor = db.Cursor(cursorTxn);
+ else
+ cursor = db.Cursor(new CursorConfig(), cursorTxn);
+ cursor.Close();
+ cursorTxn.Commit();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestDuplicate()
+ {
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ RecnoDatabase db;
+ RecnoDatabaseConfig dbConfig;
+ RecnoCursor cursor, dupCursor;
+ string dbFileName;
+
+ testName = "TestDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ dbConfig = new RecnoDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ db = RecnoDatabase.Open(dbFileName, dbConfig);
+ cursor = db.Cursor();
+
+ /*
+ * Add a record(1, 1) by cursor and move
+ * the cursor to the current record.
+ */
+ AddOneByCursor(cursor);
+ cursor.Refresh();
+
+ //Duplicate a new cursor to the same position.
+ dupCursor = cursor.Duplicate(true);
+
+ // Overwrite the record.
+ dupCursor.Overwrite(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("newdata")));
+
+ // Confirm that the original data doesn't exist.
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)1)),
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)1)));
+ Assert.IsFalse(dupCursor.Move(pair, true));
+
+ dupCursor.Close();
+ cursor.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestInsertToLoc()
+ {
+ RecnoDatabase db;
+ RecnoDatabaseConfig dbConfig;
+ RecnoCursor cursor;
+ DatabaseEntry data;
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ string dbFileName;
+
+ testName = "TestInsertToLoc";
+ testHome = testFixtureHome + "/" + testName;
+ dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open database and cursor.
+ dbConfig = new RecnoDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Renumber = true;
+ db = RecnoDatabase.Open(dbFileName, dbConfig);
+ cursor = db.Cursor();
+
+ // Add record(1,1) into database.
+ /*
+ * Add a record(1, 1) by cursor and move
+ * the cursor to the current record.
+ */
+ AddOneByCursor(cursor);
+ cursor.Refresh();
+
+ /*
+ * Insert the new record(1,10) after the
+ * record(1,1).
+ */
+ data = new DatabaseEntry(
+ BitConverter.GetBytes((int)10));
+ cursor.Insert(data, Cursor.InsertLocation.AFTER);
+
+ /*
+ * Move the cursor to the record(1,1) and
+ * confirm that the next record is the one just inserted.
+ */
+ pair = new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)1)),
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)1)));
+ Assert.IsTrue(cursor.Move(pair, true));
+ Assert.IsTrue(cursor.MoveNext());
+ Assert.AreEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Value.Data);
+
+ cursor.Close();
+ db.Close();
+ }
+
+ public void AddOneByCursor(RecnoCursor cursor)
+ {
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair =
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)1)),
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)1)));
+ cursor.Add(pair);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/RecnoDatabaseConfigTest.cs b/db-4.8.30/test/scr037/RecnoDatabaseConfigTest.cs
new file mode 100644
index 0000000..87e29a5
--- /dev/null
+++ b/db-4.8.30/test/scr037/RecnoDatabaseConfigTest.cs
@@ -0,0 +1,131 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class RecnoDatabaseConfigTest : DatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "RecnoDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ override public void TestConfigWithoutEnv()
+ {
+ testName = "TestConfigWithoutEnv";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ RecnoDatabaseConfig recnoDBConfig =
+ new RecnoDatabaseConfig();
+ Config(xmlElem, ref recnoDBConfig, true);
+ Confirm(xmlElem, recnoDBConfig, true);
+ }
+
+ [Test]
+ public void TestAppend()
+ {
+ uint recno;
+
+ testName = "TestAppend";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ RecnoDatabaseConfig recnoConfig =
+ new RecnoDatabaseConfig();
+ recnoConfig.Creation = CreatePolicy.IF_NEEDED;
+ recnoConfig.Append = new AppendRecordDelegate(
+ AppendRecord);
+ RecnoDatabase recnoDB = RecnoDatabase.Open(
+ dbFileName, recnoConfig);
+ recno = recnoDB.Append(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data")));
+
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ pair = recnoDB.Get(
+ new DatabaseEntry(BitConverter.GetBytes(recno)));
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("data"),
+ pair.Value.Data);
+
+ recnoDB.Close();
+ }
+
+ public void AppendRecord(DatabaseEntry data, uint recno)
+ {
+ data.Data = BitConverter.GetBytes(recno);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ RecnoDatabaseConfig recnoDBConfig, bool compulsory)
+ {
+ DatabaseConfig dbConfig = recnoDBConfig;
+ Confirm(xmlElement, dbConfig, compulsory);
+
+ // Confirm Recno database specific configuration
+ Configuration.ConfirmString(xmlElement, "BackingFile",
+ recnoDBConfig.BackingFile, compulsory);
+ Configuration.ConfirmCreatePolicy(xmlElement,
+ "Creation", recnoDBConfig.Creation, compulsory);
+ Configuration.ConfirmInt(xmlElement, "Delimiter",
+ recnoDBConfig.Delimiter, compulsory);
+ Configuration.ConfirmUint(xmlElement, "Length",
+ recnoDBConfig.Length, compulsory);
+ Configuration.ConfirmInt(xmlElement, "PadByte",
+ recnoDBConfig.PadByte, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Renumber",
+ recnoDBConfig.Renumber, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Snapshot",
+ recnoDBConfig.Snapshot, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref RecnoDatabaseConfig recnoDBConfig, bool compulsory)
+ {
+ int intValue = new int();
+ uint uintValue = new uint();
+ DatabaseConfig dbConfig = recnoDBConfig;
+ Config(xmlElement, ref dbConfig, compulsory);
+
+ // Configure specific fields/properties of Recno database
+ Configuration.ConfigCreatePolicy(xmlElement, "Creation",
+ ref recnoDBConfig.Creation, compulsory);
+ if (Configuration.ConfigInt(xmlElement, "Delimiter",
+ ref intValue, compulsory))
+ recnoDBConfig.Delimiter = intValue;
+ if (Configuration.ConfigUint(xmlElement, "Length",
+ ref uintValue, compulsory))
+ recnoDBConfig.Length = uintValue;
+ if (Configuration.ConfigInt(xmlElement, "PadByte",
+ ref intValue, compulsory))
+ recnoDBConfig.PadByte = intValue;
+ Configuration.ConfigBool(xmlElement, "Renumber",
+ ref recnoDBConfig.Renumber, compulsory);
+ Configuration.ConfigBool(xmlElement, "Snapshot",
+ ref recnoDBConfig.Snapshot, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/RecnoDatabaseTest.cs b/db-4.8.30/test/scr037/RecnoDatabaseTest.cs
new file mode 100644
index 0000000..12238d7
--- /dev/null
+++ b/db-4.8.30/test/scr037/RecnoDatabaseTest.cs
@@ -0,0 +1,487 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class RecnoDatabaseTest : DatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "RecnoDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestOpenExistingRecnoDB()
+ {
+ testName = "TestOpenExistingRecnoDB";
+ testHome = testFixtureHome + "/" + testName;
+ string recnoDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ RecnoDatabaseConfig recConfig =
+ new RecnoDatabaseConfig();
+ recConfig.Creation = CreatePolicy.ALWAYS;
+ RecnoDatabase recDB = RecnoDatabase.Open(
+ recnoDBFileName, recConfig);
+ recDB.Close();
+
+ RecnoDatabaseConfig dbConfig = new RecnoDatabaseConfig();
+ string backingFile = testHome + "/backingFile";
+ File.Copy(recnoDBFileName, backingFile);
+ dbConfig.BackingFile = backingFile;
+ RecnoDatabase db = RecnoDatabase.Open(recnoDBFileName, dbConfig);
+ Assert.AreEqual(db.Type, DatabaseType.RECNO);
+ db.Close();
+ }
+
+ [Test]
+ public void TestOpenNewRecnoDB()
+ {
+ RecnoDatabase recnoDB;
+ RecnoDatabaseConfig recnoConfig;
+
+ testName = "TestOpenNewRecnoDB";
+ testHome = testFixtureHome + "/" + testName;
+ string recnoDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ recnoConfig = new RecnoDatabaseConfig();
+ RecnoDatabaseConfigTest.Config(xmlElem,
+ ref recnoConfig, true);
+ recnoDB = RecnoDatabase.Open(recnoDBFileName,
+ recnoConfig);
+ Confirm(xmlElem, recnoDB, true);
+ recnoDB.Close();
+ }
+
+ [Test]
+ public void TestAppendWithoutTxn()
+ {
+ testName = "TestAppendWithoutTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string recnoDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ RecnoDatabaseConfig recnoConfig =
+ new RecnoDatabaseConfig();
+ recnoConfig.Creation = CreatePolicy.ALWAYS;
+ RecnoDatabase recnoDB = RecnoDatabase.Open(
+ recnoDBFileName, recnoConfig);
+
+ DatabaseEntry data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ uint num = recnoDB.Append(data);
+ DatabaseEntry key = new DatabaseEntry(
+ BitConverter.GetBytes(num));
+ Assert.IsTrue(recnoDB.Exists(key));
+ KeyValuePair<DatabaseEntry, DatabaseEntry> record =
+ recnoDB.Get(key);
+ Assert.IsTrue(data.Data.Length ==
+ record.Value.Data.Length);
+ for (int i = 0; i < data.Data.Length; i++)
+ Assert.IsTrue(data.Data[i] ==
+ record.Value.Data[i]);
+ recnoDB.Close();
+ }
+
+ [Test]
+ public void TestCompact()
+ {
+ testName = "TestCompact";
+ testHome = testFixtureHome + "/" + testName;
+ string recnoDBFileName = testHome + "/" +
+ testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ RecnoDatabaseConfig recnoConfig =
+ new RecnoDatabaseConfig();
+ recnoConfig.Creation = CreatePolicy.ALWAYS;
+ recnoConfig.Length = 512;
+
+ DatabaseEntry key, data;
+ RecnoDatabase recnoDB;
+ using (recnoDB = RecnoDatabase.Open(
+ recnoDBFileName, recnoConfig))
+ {
+ for (int i = 1; i <= 5000; i++)
+ {
+ data = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ recnoDB.Append(data);
+ }
+
+ for (int i = 1; i <= 5000; i++)
+ {
+ if (i > 500 && (i % 5 != 0))
+ {
+ key = new DatabaseEntry(
+ BitConverter.GetBytes(i));
+ recnoDB.Delete(key);
+ }
+ }
+
+ int startInt = 1;
+ int stopInt = 2500;
+ DatabaseEntry start, stop;
+
+ start = new DatabaseEntry(
+ BitConverter.GetBytes(startInt));
+ stop = new DatabaseEntry(
+ BitConverter.GetBytes(stopInt));
+ Assert.IsTrue(recnoDB.Exists(start));
+ Assert.IsTrue(recnoDB.Exists(stop));
+
+ CompactConfig cCfg = new CompactConfig();
+ cCfg.start = start;
+ cCfg.stop = stop;
+ cCfg.FillPercentage = 30;
+ cCfg.Pages = 1;
+ cCfg.returnEnd = true;
+ cCfg.Timeout = 5000;
+ cCfg.TruncatePages = true;
+ CompactData compactData = recnoDB.Compact(cCfg);
+
+ Assert.IsNotNull(compactData.End);
+ Assert.AreNotEqual(0, compactData.PagesExamined);
+ }
+ }
+
+ [Test]
+ public void TestStats()
+ {
+ testName = "TestStats";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" +
+ testName + ".db";
+ Configuration.ClearDir(testHome);
+
+ RecnoDatabaseConfig dbConfig =
+ new RecnoDatabaseConfig();
+ ConfigCase1(dbConfig);
+ RecnoDatabase db = RecnoDatabase.Open(dbFileName,
+ dbConfig);
+ RecnoStats stats = db.Stats();
+ ConfirmStatsPart1Case1(stats);
+
+ // Put 1000 records into the database.
+ PutRecordCase1(db, null);
+ stats = db.Stats();
+ ConfirmStatsPart2Case1(stats);
+
+ // Delete 500 records.
+ for (int i = 250; i <= 750; i++)
+ db.Delete(new DatabaseEntry(BitConverter.GetBytes(i)));
+ stats = db.Stats();
+ ConfirmStatsPart3Case1(stats);
+
+ db.Close();
+ }
+
+ [Test]
+ public void TestStatsInTxn()
+ {
+ testName = "TestStatsInTxn";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, false);
+ }
+
+ [Test]
+ public void TestStatsWithIsolation()
+ {
+ testName = "TestStatsWithIsolation";
+ testHome = testFixtureHome + "/" + testName;
+ Configuration.ClearDir(testHome);
+
+ StatsInTxn(testHome, testName, true);
+ }
+
+ public void StatsInTxn(string home, string name, bool ifIsolation)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ EnvConfigCase1(envConfig);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ Transaction openTxn = env.BeginTransaction();
+ RecnoDatabaseConfig dbConfig =
+ new RecnoDatabaseConfig();
+ ConfigCase1(dbConfig);
+ dbConfig.Env = env;
+ RecnoDatabase db = RecnoDatabase.Open(name + ".db",
+ dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction statsTxn = env.BeginTransaction();
+ RecnoStats stats;
+ RecnoStats fastStats;
+ if (ifIsolation == false)
+ {
+ stats = db.Stats(statsTxn);
+ fastStats = db.FastStats(statsTxn);
+ }
+ else
+ {
+ stats = db.Stats(statsTxn, Isolation.DEGREE_ONE);
+ fastStats = db.FastStats(statsTxn,
+ Isolation.DEGREE_ONE);
+ }
+ ConfirmStatsPart1Case1(stats);
+
+ // Put 1000 records into the database.
+ PutRecordCase1(db, statsTxn);
+
+ if (ifIsolation == false)
+ {
+ stats = db.Stats(statsTxn);
+ fastStats = db.FastStats(statsTxn);
+ }
+ else
+ {
+ stats = db.Stats(statsTxn, Isolation.DEGREE_TWO);
+ fastStats = db.FastStats(statsTxn,
+ Isolation.DEGREE_TWO);
+ }
+ ConfirmStatsPart2Case1(stats);
+
+ // Delete 500 records.
+ for (int i = 250; i <= 750; i++)
+ db.Delete(new DatabaseEntry(BitConverter.GetBytes(i)),
+ statsTxn);
+
+ if (ifIsolation == false)
+ {
+ stats = db.Stats(statsTxn);
+ fastStats = db.FastStats(statsTxn);
+ }
+ else
+ {
+ stats = db.Stats(statsTxn, Isolation.DEGREE_THREE);
+ fastStats = db.FastStats(statsTxn,
+ Isolation.DEGREE_THREE);
+ }
+ ConfirmStatsPart3Case1(stats);
+
+ statsTxn.Commit();
+ db.Close();
+ env.Close();
+ }
+
+ public void EnvConfigCase1(DatabaseEnvironmentConfig cfg)
+ {
+ cfg.Create = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.UseLogging = true;
+ }
+
+ public void ConfigCase1(RecnoDatabaseConfig dbConfig)
+ {
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.PageSize = 4096;
+ dbConfig.Length = 4000;
+ dbConfig.PadByte = 256;
+ }
+
+ public void PutRecordCase1(RecnoDatabase db, Transaction txn)
+ {
+ for (int i = 1; i <= 1000; i++)
+ {
+ if (txn == null)
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+ else
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(
+ BitConverter.GetBytes(i)), txn);
+ }
+ }
+
+ public void ConfirmStatsPart1Case1(RecnoStats stats)
+ {
+ Assert.AreEqual(1, stats.EmptyPages);
+ Assert.AreEqual(1, stats.Levels);
+ Assert.AreNotEqual(0, stats.MagicNumber);
+ Assert.AreEqual(10, stats.MetadataFlags);
+ Assert.AreEqual(2, stats.MinKey);
+ Assert.AreEqual(2, stats.nPages);
+ Assert.AreEqual(4096, stats.PageSize);
+ Assert.AreEqual(4000, stats.RecordLength);
+ Assert.AreEqual(256, stats.RecordPadByte);
+ Assert.AreEqual(9, stats.Version);
+ }
+
+ public void ConfirmStatsPart2Case1(RecnoStats stats)
+ {
+ Assert.AreEqual(0, stats.DuplicatePages);
+ Assert.AreEqual(0, stats.DuplicatePagesFreeBytes);
+ Assert.AreNotEqual(0, stats.InternalPages);
+ Assert.AreNotEqual(0, stats.InternalPagesFreeBytes);
+ Assert.AreNotEqual(0, stats.LeafPages);
+ Assert.AreNotEqual(0, stats.LeafPagesFreeBytes);
+ Assert.AreEqual(1000, stats.nData);
+ Assert.AreEqual(1000, stats.nKeys);
+ Assert.AreNotEqual(0, stats.OverflowPages);
+ Assert.AreNotEqual(0, stats.OverflowPagesFreeBytes);
+ }
+
+ public void ConfirmStatsPart3Case1(RecnoStats stats)
+ {
+ Assert.AreNotEqual(0, stats.FreePages);
+ }
+
+ [Test]
+ public void TestTruncateUnusedPages()
+ {
+ testName = "TestTruncateUnusedPages";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseCDB = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ RecnoDatabaseConfig dbConfig =
+ new RecnoDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ dbConfig.PageSize = 512;
+ RecnoDatabase db = RecnoDatabase.Open(
+ testName + ".db", dbConfig);
+
+ ModifyRecordsInDB(db, null);
+ Assert.Less(0, db.TruncateUnusedPages());
+
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestTruncateUnusedPagesInTxn()
+ {
+ testName = "TestTruncateUnusedPagesInTxn";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ testHome, envConfig);
+
+ Transaction openTxn = env.BeginTransaction();
+ RecnoDatabaseConfig dbConfig =
+ new RecnoDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ dbConfig.PageSize = 512;
+ RecnoDatabase db = RecnoDatabase.Open(
+ testName + ".db", dbConfig, openTxn);
+ openTxn.Commit();
+
+ Transaction modifyTxn = env.BeginTransaction();
+ ModifyRecordsInDB(db, modifyTxn);
+ Assert.Less(0, db.TruncateUnusedPages(modifyTxn));
+ modifyTxn.Commit();
+
+ db.Close();
+ env.Close();
+ }
+
+ public void ModifyRecordsInDB(RecnoDatabase db,
+ Transaction txn)
+ {
+ uint[] recnos = new uint[100];
+
+ if (txn == null)
+ {
+ // Add a lot of records into database.
+ for (int i = 0; i < 100; i++)
+ recnos[i] = db.Append(new DatabaseEntry(
+ new byte[10240]));
+
+ // Remove some records from database.
+ for (int i = 30; i < 100; i++)
+ db.Delete(new DatabaseEntry(
+ BitConverter.GetBytes(recnos[i])));
+ }
+ else
+ {
+ // Add a lot of records into database in txn.
+ for (int i = 0; i < 100; i++)
+ recnos[i] = db.Append(new DatabaseEntry(
+ new byte[10240]), txn);
+
+ // Remove some records from database in txn.
+ for (int i = 30; i < 100; i++)
+ db.Delete(new DatabaseEntry(
+ BitConverter.GetBytes(recnos[i])), txn);
+ }
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ RecnoDatabase recnoDB, bool compulsory)
+ {
+ DatabaseTest.Confirm(xmlElem, recnoDB, compulsory);
+
+ // Confirm recno database specific field/property
+ Configuration.ConfirmInt(xmlElem, "Delimiter",
+ recnoDB.RecordDelimiter, compulsory);
+ Configuration.ConfirmUint(xmlElem, "Length",
+ recnoDB.RecordLength, compulsory);
+ Configuration.ConfirmInt(xmlElem, "PadByte",
+ recnoDB.RecordPad, compulsory);
+ Configuration.ConfirmBool(xmlElem, "Renumber",
+ recnoDB.Renumber, compulsory);
+ Configuration.ConfirmBool(xmlElem, "Snapshot",
+ recnoDB.Snapshot, compulsory);
+ Assert.AreEqual(DatabaseType.RECNO, recnoDB.Type);
+ string type = recnoDB.Type.ToString();
+ Assert.IsNotNull(type);
+ }
+ }
+}
+
diff --git a/db-4.8.30/test/scr037/ReplicationConfigTest.cs b/db-4.8.30/test/scr037/ReplicationConfigTest.cs
new file mode 100644
index 0000000..3310c50
--- /dev/null
+++ b/db-4.8.30/test/scr037/ReplicationConfigTest.cs
@@ -0,0 +1,215 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class ReplicationConfigTest
+ {
+ private string testFixtureName;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "ReplicationConfigTest";
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+
+ ReplicationConfig repConfig = new ReplicationConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ Config(xmlElem, ref repConfig, true);
+ Confirm(xmlElem, repConfig, true);
+
+ repConfig.Clockskew(102, 100);
+ Assert.AreEqual(102, repConfig.ClockskewFast);
+ Assert.AreEqual(100, repConfig.ClockskewSlow);
+
+ repConfig.TransmitLimit(1, 1024);
+ Assert.AreEqual(1, repConfig.TransmitLimitGBytes);
+ Assert.AreEqual(1024, repConfig.TransmitLimitBytes);
+
+ repConfig.RetransmissionRequest(10, 100);
+ Assert.AreEqual(100, repConfig.RetransmissionRequestMax);
+ Assert.AreEqual(10, repConfig.RetransmissionRequestMin);
+ }
+
+ [Test]
+ public void TestRepMgrLocalSite()
+ {
+ string host = "127.0.0.0";
+ uint port = 8888;
+ testName = "TestRepMgrLocalSite";
+
+ ReplicationConfig repConfig1 = new ReplicationConfig();
+ repConfig1.RepMgrLocalSite = new ReplicationHostAddress();
+ repConfig1.RepMgrLocalSite.Host = host;
+ repConfig1.RepMgrLocalSite.Port = port;
+ Assert.AreEqual(host, repConfig1.RepMgrLocalSite.Host);
+ Assert.AreEqual(port, repConfig1.RepMgrLocalSite.Port);
+
+ ReplicationConfig repConfig2 = new ReplicationConfig();
+ repConfig2.RepMgrLocalSite =
+ new ReplicationHostAddress(host, port);
+ Assert.AreEqual(host, repConfig2.RepMgrLocalSite.Host);
+ Assert.AreEqual(port, repConfig2.RepMgrLocalSite.Port);
+
+ ReplicationConfig repConfig3 = new ReplicationConfig();
+ repConfig3.RepMgrLocalSite =
+ new ReplicationHostAddress(host + ":" + port);
+ Assert.AreEqual(host, repConfig3.RepMgrLocalSite.Host);
+ Assert.AreEqual(port, repConfig3.RepMgrLocalSite.Port);
+ }
+
+ [Test]
+ public void TestRepMgrAckPolicy()
+ {
+ testName = "TestRepMgrAckPolicy";
+
+ ReplicationConfig repConfig = new ReplicationConfig();
+ repConfig.RepMgrAckPolicy = AckPolicy.ALL;
+ Assert.AreEqual(AckPolicy.ALL,
+ repConfig.RepMgrAckPolicy);
+
+ repConfig.RepMgrAckPolicy = AckPolicy.ALL_PEERS;
+ Assert.AreEqual(AckPolicy.ALL_PEERS,
+ repConfig.RepMgrAckPolicy);
+
+ repConfig.RepMgrAckPolicy = AckPolicy.NONE;
+ Assert.AreEqual(AckPolicy.NONE,
+ repConfig.RepMgrAckPolicy);
+
+ repConfig.RepMgrAckPolicy = AckPolicy.ONE;
+ Assert.AreEqual(AckPolicy.ONE,
+ repConfig.RepMgrAckPolicy);
+
+ repConfig.RepMgrAckPolicy = AckPolicy.ONE_PEER;
+ Assert.AreEqual(AckPolicy.ONE_PEER,
+ repConfig.RepMgrAckPolicy);
+
+ repConfig.RepMgrAckPolicy = AckPolicy.QUORUM;
+ Assert.AreEqual(AckPolicy.QUORUM,
+ repConfig.RepMgrAckPolicy);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ ReplicationConfig cfg, bool compulsory)
+ {
+ Configuration.ConfirmUint(xmlElement,
+ "AckTimeout", cfg.AckTimeout,
+ compulsory);
+ Configuration.ConfirmBool(xmlElement, "BulkTransfer",
+ cfg.BulkTransfer, compulsory);
+ Configuration.ConfirmUint(xmlElement, "CheckpointDelay",
+ cfg.CheckpointDelay, compulsory);
+ Configuration.ConfirmUint(xmlElement, "ConnectionRetry",
+ cfg.ConnectionRetry, compulsory);
+ Configuration.ConfirmBool(xmlElement, "DelayClientSync",
+ cfg.DelayClientSync, compulsory);
+ Configuration.ConfirmUint(xmlElement, "ElectionRetry",
+ cfg.ElectionRetry, compulsory);
+ Configuration.ConfirmUint(xmlElement, "ElectionTimeout",
+ cfg.ElectionTimeout, compulsory);
+ Configuration.ConfirmUint(xmlElement, "FullElectionTimeout",
+ cfg.FullElectionTimeout, compulsory);
+ Configuration.ConfirmUint(xmlElement, "HeartbeatMonitor",
+ cfg.HeartbeatMonitor, compulsory);
+ Configuration.ConfirmUint(xmlElement, "HeartbeatSend",
+ cfg.HeartbeatSend, compulsory);
+ Configuration.ConfirmUint(xmlElement, "LeaseTimeout",
+ cfg.LeaseTimeout, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoAutoInit",
+ cfg.NoAutoInit, compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoBlocking",
+ cfg.NoBlocking, compulsory);
+ Configuration.ConfirmUint(xmlElement, "NSites",
+ cfg.NSites, compulsory);
+ Configuration.ConfirmUint(xmlElement, "Priority",
+ cfg.Priority, compulsory);
+ Configuration.ConfirmAckPolicy(xmlElement,
+ "RepMgrAckPolicy", cfg.RepMgrAckPolicy, compulsory);
+ Configuration.ConfirmReplicationHostAddress(xmlElement,
+ "RepMgrLocalSite", cfg.RepMgrLocalSite, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Strict2Site",
+ cfg.Strict2Site, compulsory);
+ Configuration.ConfirmBool(xmlElement, "UseMasterLeases",
+ cfg.UseMasterLeases, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref ReplicationConfig cfg, bool compulsory)
+ {
+ uint uintValue = new uint();
+
+ if (Configuration.ConfigUint(xmlElement, "AckTimeout",
+ ref uintValue, compulsory))
+ cfg.AckTimeout = uintValue;
+ Configuration.ConfigBool(xmlElement, "BulkTransfer",
+ ref cfg.BulkTransfer, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "CheckpointDelay",
+ ref uintValue, compulsory))
+ cfg.CheckpointDelay = uintValue;
+ if (Configuration.ConfigUint(xmlElement, "ConnectionRetry",
+ ref uintValue, compulsory))
+ cfg.ConnectionRetry = uintValue;
+ Configuration.ConfigBool(xmlElement, "DelayClientSync",
+ ref cfg.DelayClientSync, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "ElectionRetry",
+ ref uintValue, compulsory))
+ cfg.ElectionRetry = uintValue;
+ if (Configuration.ConfigUint(xmlElement, "ElectionTimeout",
+ ref uintValue, compulsory))
+ cfg.ElectionTimeout = uintValue;
+ if (Configuration.ConfigUint(xmlElement, "FullElectionTimeout",
+ ref uintValue, compulsory))
+ cfg.FullElectionTimeout = uintValue;
+ if (Configuration.ConfigUint(xmlElement, "HeartbeatMonitor",
+ ref uintValue, compulsory))
+ cfg.HeartbeatMonitor = uintValue;
+ if (Configuration.ConfigUint(xmlElement, "HeartbeatSend",
+ ref uintValue, compulsory))
+ cfg.HeartbeatSend = uintValue;
+ if (Configuration.ConfigUint(xmlElement, "LeaseTimeout",
+ ref uintValue, compulsory))
+ cfg.LeaseTimeout = uintValue;
+ Configuration.ConfigBool(xmlElement, "NoAutoInit",
+ ref cfg.NoAutoInit, compulsory);
+ Configuration.ConfigBool(xmlElement, "NoBlocking",
+ ref cfg.NoBlocking, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "NSites",
+ ref uintValue, compulsory))
+ cfg.NSites = uintValue;
+ if (Configuration.ConfigUint(xmlElement, "Priority",
+ ref uintValue, compulsory))
+ cfg.Priority = uintValue;
+ Configuration.ConfigAckPolicy(xmlElement,
+ "RepMgrAckPolicy", ref cfg.RepMgrAckPolicy,
+ compulsory);
+ cfg.RepMgrLocalSite = new ReplicationHostAddress();
+ Configuration.ConfigReplicationHostAddress(xmlElement,
+ "RepMgrLocalSite", ref cfg.RepMgrLocalSite, compulsory);
+ Configuration.ConfigBool(xmlElement, "Strict2Site",
+ ref cfg.Strict2Site, compulsory);
+ Configuration.ConfigBool(xmlElement, "UseMasterLeases",
+ ref cfg.UseMasterLeases, compulsory);
+ }
+
+ }
+} \ No newline at end of file
diff --git a/db-4.8.30/test/scr037/ReplicationTest.cs b/db-4.8.30/test/scr037/ReplicationTest.cs
new file mode 100644
index 0000000..938689b
--- /dev/null
+++ b/db-4.8.30/test/scr037/ReplicationTest.cs
@@ -0,0 +1,748 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class ReplicationTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ private EventWaitHandle clientStartSignal;
+ private EventWaitHandle masterCloseSignal;
+
+ private EventWaitHandle client1StartSignal;
+ private EventWaitHandle client1ReadySignal;
+ private EventWaitHandle client2StartSignal;
+ private EventWaitHandle client2ReadySignal;
+ private EventWaitHandle client3StartSignal;
+ private EventWaitHandle client3ReadySignal;
+ private EventWaitHandle masterLeaveSignal;
+
+ [TestFixtureSetUp]
+ public void SetUp()
+ {
+ testFixtureName = "ReplicationTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+ try
+ {
+ Configuration.ClearDir(testFixtureHome);
+ }
+ catch (Exception)
+ {
+ throw new TestException("Please clean the directory");
+ }
+ }
+
+ [Test]
+ public void TestRepMgr()
+ {
+ testName = "TestRepMgr";
+ testHome = testFixtureHome + "/" + testName;
+
+ clientStartSignal = new AutoResetEvent(false);
+ masterCloseSignal = new AutoResetEvent(false);
+
+ Thread thread1 = new Thread(new ThreadStart(Master));
+ Thread thread2 = new Thread(new ThreadStart(Client));
+
+ // Start master thread before client thread.
+ thread1.Start();
+ Thread.Sleep(1000);
+ thread2.Start();
+ thread2.Join();
+ thread1.Join();
+
+ clientStartSignal.Close();
+ masterCloseSignal.Close();
+ }
+
+ public void Master()
+ {
+ string home = testHome + "/Master";
+ string dbName = "rep.db";
+ Configuration.ClearDir(home);
+
+ /*
+ * Configure and open environment with replication
+ * application.
+ */
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.UseReplication = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 20485760, 1);
+ cfg.UseLocking = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.Create = true;
+ cfg.UseLogging = true;
+ cfg.RunRecovery = true;
+ cfg.TxnNoSync = true;
+ cfg.FreeThreaded = true;
+ cfg.RepSystemCfg = new ReplicationConfig();
+ cfg.RepSystemCfg.RepMgrLocalSite =
+ new ReplicationHostAddress("127.0.0.1", 8870);
+ cfg.RepSystemCfg.Priority = 100;
+ cfg.RepSystemCfg.NSites = 2;
+ cfg.RepSystemCfg.BulkTransfer = true;
+ cfg.RepSystemCfg.AckTimeout = 2000;
+ cfg.RepSystemCfg.BulkTransfer = true;
+ cfg.RepSystemCfg.CheckpointDelay = 1500;
+ cfg.RepSystemCfg.Clockskew(102, 100);
+ cfg.RepSystemCfg.ConnectionRetry = 10;
+ cfg.RepSystemCfg.DelayClientSync = false;
+ cfg.RepSystemCfg.ElectionRetry = 5;
+ cfg.RepSystemCfg.ElectionTimeout = 3000;
+ cfg.RepSystemCfg.FullElectionTimeout = 5000;
+ cfg.RepSystemCfg.HeartbeatMonitor = 100;
+ cfg.RepSystemCfg.HeartbeatSend = 10;
+ cfg.RepSystemCfg.LeaseTimeout = 1300;
+ cfg.RepSystemCfg.NoAutoInit = false;
+ cfg.RepSystemCfg.NoBlocking = false;
+ cfg.RepSystemCfg.RepMgrAckPolicy =
+ AckPolicy.ALL_PEERS;
+ cfg.RepSystemCfg.RetransmissionRequest(10, 100);
+ cfg.RepSystemCfg.Strict2Site = true;
+ cfg.RepSystemCfg.UseMasterLeases = false;
+ cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, cfg);
+
+ // Get initial replication stats.
+ ReplicationStats repStats = env.ReplicationSystemStats();
+ env.PrintReplicationSystemStats();
+ Assert.AreEqual(100, repStats.EnvPriority);
+ Assert.AreEqual(1,
+ repStats.CurrentElectionGenerationNumber);
+ Assert.AreEqual(0, repStats.CurrentGenerationNumber);
+ Assert.AreEqual(0, repStats.AppliedTransactions);
+
+ // Start a master site with replication manager.
+ env.RepMgrStartMaster(3);
+
+ // Open a btree database and write some data.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.AutoCommit = true;
+ dbConfig.Env = env;
+ dbConfig.PageSize = 512;
+ BTreeDatabase db = BTreeDatabase.Open(dbName,
+ dbConfig);
+ for (int i = 0; i < 5; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+
+ Console.WriteLine(
+ "Master: Finished initialization and data#1.");
+
+ // Client site could enter now.
+ clientStartSignal.Set();
+ Console.WriteLine(
+ "Master: Wait for Client to join and get #1.");
+
+ Console.WriteLine("...");
+
+ // Put some new data into master site.
+ for (int i = 10; i < 15; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+ Console.WriteLine(
+ "Master: Write something new, data #2.");
+ Console.WriteLine("Master: Wait for client to read #2...");
+
+ // Get the stats.
+ repStats = env.ReplicationSystemStats(true);
+ env.PrintReplicationSystemStats();
+ Assert.LessOrEqual(0, repStats.AppliedTransactions);
+ Assert.LessOrEqual(0, repStats.AwaitedLSN.LogFileNumber);
+ Assert.LessOrEqual(0, repStats.AwaitedLSN.Offset);
+ Assert.LessOrEqual(0, repStats.AwaitedPage);
+ Assert.LessOrEqual(0, repStats.BadGenerationMessages);
+ Assert.LessOrEqual(0, repStats.BulkBufferFills);
+ Assert.LessOrEqual(0, repStats.BulkBufferOverflows);
+ Assert.LessOrEqual(0, repStats.BulkBufferTransfers);
+ Assert.LessOrEqual(0, repStats.BulkRecordsStored);
+ Assert.LessOrEqual(0, repStats.ClientServiceRequests);
+ Assert.LessOrEqual(0, repStats.ClientServiceRequestsMissing);
+ Assert.IsInstanceOfType(typeof(bool), repStats.ClientStartupComplete);
+ Assert.AreEqual(2, repStats.CurrentElectionGenerationNumber);
+ Assert.AreEqual(1, repStats.CurrentGenerationNumber);
+ Assert.LessOrEqual(0, repStats.CurrentQueuedLogRecords);
+ Assert.LessOrEqual(0, repStats.CurrentWinner);
+ Assert.LessOrEqual(0, repStats.CurrentWinnerMaxLSN.LogFileNumber);
+ Assert.LessOrEqual(0, repStats.CurrentWinnerMaxLSN.Offset);
+ Assert.LessOrEqual(0, repStats.DuplicateLogRecords);
+ Assert.LessOrEqual(0, repStats.DuplicatePages);
+ Assert.LessOrEqual(0, repStats.DupMasters);
+ Assert.LessOrEqual(0, repStats.ElectionGenerationNumber);
+ Assert.LessOrEqual(0, repStats.ElectionPriority);
+ Assert.LessOrEqual(0, repStats.Elections);
+ Assert.LessOrEqual(0, repStats.ElectionStatus);
+ Assert.LessOrEqual(0, repStats.ElectionsWon);
+ Assert.LessOrEqual(0, repStats.ElectionTiebreaker);
+ Assert.LessOrEqual(0, repStats.ElectionTimeSec);
+ Assert.LessOrEqual(0, repStats.ElectionTimeUSec);
+ Assert.AreEqual(repStats.EnvID, repStats.MasterEnvID);
+ Assert.LessOrEqual(0, repStats.EnvPriority);
+ Assert.LessOrEqual(0, repStats.FailedMessageSends);
+ Assert.LessOrEqual(0, repStats.ForcedRerequests);
+ Assert.LessOrEqual(0, repStats.IgnoredMessages);
+ Assert.LessOrEqual(0, repStats.MasterChanges);
+ Assert.LessOrEqual(0, repStats.MasterEnvID);
+ Assert.LessOrEqual(0, repStats.MaxLeaseSec);
+ Assert.LessOrEqual(0, repStats.MaxLeaseUSec);
+ Assert.LessOrEqual(0, repStats.MaxPermanentLSN.Offset);
+ Assert.LessOrEqual(0, repStats.MaxQueuedLogRecords);
+ Assert.LessOrEqual(0, repStats.MessagesSent);
+ Assert.LessOrEqual(0, repStats.MissedLogRecords);
+ Assert.LessOrEqual(0, repStats.MissedPages);
+ Assert.LessOrEqual(0, repStats.NewSiteMessages);
+ Assert.LessOrEqual(repStats.MaxPermanentLSN.LogFileNumber,
+ repStats.NextLSN.LogFileNumber);
+ if (repStats.MaxPermanentLSN.LogFileNumber ==
+ repStats.NextLSN.LogFileNumber)
+ Assert.Less(repStats.MaxPermanentLSN.Offset,
+ repStats.NextLSN.Offset);
+ Assert.LessOrEqual(0, repStats.NextPage);
+ Assert.LessOrEqual(0, repStats.Outdated);
+ Assert.LessOrEqual(0, repStats.QueuedLogRecords);
+ Assert.LessOrEqual(0, repStats.ReceivedLogRecords);
+ Assert.LessOrEqual(0, repStats.ReceivedMessages);
+ Assert.LessOrEqual(0, repStats.ReceivedPages);
+ Assert.LessOrEqual(0, repStats.RegisteredSites);
+ Assert.LessOrEqual(0, repStats.RegisteredSitesNeeded);
+ Assert.LessOrEqual(0, repStats.Sites);
+ Assert.LessOrEqual(0, repStats.StartSyncMessagesDelayed);
+ Assert.AreEqual(2, repStats.Status);
+ Assert.LessOrEqual(0, repStats.Throttled);
+ Assert.LessOrEqual(0, repStats.Votes);
+
+ // Get replication manager statistics.
+ RepMgrStats repMgrStats = env.RepMgrSystemStats(true);
+ Assert.LessOrEqual(0, repMgrStats.DroppedConnections);
+ Assert.LessOrEqual(0, repMgrStats.DroppedMessages);
+ Assert.LessOrEqual(0, repMgrStats.FailedConnections);
+ Assert.LessOrEqual(0, repMgrStats.FailedMessages);
+ Assert.LessOrEqual(0, repMgrStats.QueuedMessages);
+
+ // Print them out.
+ env.PrintRepMgrSystemStats();
+
+ // Wait until client has finished reading.
+ masterCloseSignal.WaitOne();
+ Console.WriteLine("Master: Leave as well.");
+
+ // Close all.
+ db.Close(false);
+ env.LogFlush();
+ env.Close();
+ }
+
+ public void Client()
+ {
+ string home = testHome + "/Client";
+ Configuration.ClearDir(home);
+
+ clientStartSignal.WaitOne();
+ Console.WriteLine("Client: Join the replication");
+
+ // Open a environment.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.UseReplication = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 20485760, 1);
+ cfg.UseLocking = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.Create = true;
+ cfg.UseLogging = true;
+ cfg.RunRecovery = true;
+ cfg.TxnNoSync = true;
+ cfg.FreeThreaded = true;
+ cfg.LockTimeout = 50000;
+ cfg.RepSystemCfg = new ReplicationConfig();
+ cfg.RepSystemCfg.RepMgrLocalSite =
+ new ReplicationHostAddress("127.0.0.1", 6870);
+ cfg.RepSystemCfg.Priority = 10;
+ cfg.RepSystemCfg.AddRemoteSite(
+ new ReplicationHostAddress("127.0.0.1", 8870), false);
+ cfg.RepSystemCfg.NSites = 2;
+ cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, cfg);
+
+ // Start a client site with replication manager.
+ env.RepMgrStartClient(3, false);
+
+ // Leave enough time to sync.
+ Thread.Sleep(20000);
+
+ // Open database.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.NEVER;
+ dbConfig.AutoCommit = true;
+ dbConfig.Env = env;
+ dbConfig.PageSize = 512;
+ BTreeDatabase db = BTreeDatabase.Open("rep.db",
+ dbConfig);
+
+ // Write data into database.
+ Console.WriteLine("Client: Start reading data #1.");
+ for (int i = 0; i < 5; i++)
+ db.GetBoth(new DatabaseEntry(
+ BitConverter.GetBytes(i)), new DatabaseEntry(
+ BitConverter.GetBytes(i)));
+
+ // Leave sometime for client to read new data from master.
+ Thread.Sleep(20000);
+
+ /*
+ * Read the data. All data exists in master site should
+ * appear in the client site.
+ */
+ Console.WriteLine("Client: Start reading data #2.");
+ for (int i = 10; i < 15; i++)
+ db.GetBoth(new DatabaseEntry(
+ BitConverter.GetBytes(i)), new DatabaseEntry(
+ BitConverter.GetBytes(i)));
+
+ // Get the latest replication subsystem statistics.
+ ReplicationStats repStats = env.ReplicationSystemStats();
+ Assert.IsTrue(repStats.ClientStartupComplete);
+ Assert.AreEqual(1, repStats.DuplicateLogRecords);
+ Assert.LessOrEqual(0, repStats.EnvID);
+ Assert.LessOrEqual(0, repStats.NextPage);
+ Assert.LessOrEqual(0, repStats.ReceivedPages);
+ Assert.AreEqual(1, repStats.Status);
+
+ // Close all.
+ db.Close(false);
+ env.LogFlush();
+ env.Close();
+ Console.WriteLine(
+ "Client: All data is read. Leaving the replication");
+
+ // The master is closed after client's close.
+ masterCloseSignal.Set();
+ }
+
+ private void stuffHappened(NotificationEvent eventCode, byte[] info)
+ {
+ switch (eventCode)
+ {
+ case NotificationEvent.REP_CLIENT:
+ Console.WriteLine("CLIENT");
+ break;
+ case NotificationEvent.REP_MASTER:
+ Console.WriteLine("MASTER");
+ break;
+ case NotificationEvent.REP_NEWMASTER:
+ Console.WriteLine("NEWMASTER");
+ break;
+ case NotificationEvent.REP_STARTUPDONE:
+ /* We don't care about these */
+ break;
+ case NotificationEvent.REP_PERM_FAILED:
+ Console.WriteLine("Insufficient Acks.");
+ break;
+ default:
+ Console.WriteLine("Event: {0}", eventCode);
+ break;
+ }
+ }
+
+ [Test]
+ public void TestElection()
+ {
+ testName = "TestElection";
+ testHome = testFixtureHome + "/" + testName;
+
+ client1StartSignal = new AutoResetEvent(false);
+ client2StartSignal = new AutoResetEvent(false);
+ client1ReadySignal = new AutoResetEvent(false);
+ client2ReadySignal = new AutoResetEvent(false);
+ client3StartSignal = new AutoResetEvent(false);
+ client3ReadySignal = new AutoResetEvent(false);
+ masterLeaveSignal = new AutoResetEvent(false);
+
+ Thread thread1 = new Thread(
+ new ThreadStart(UnstableMaster));
+ Thread thread2 = new Thread(
+ new ThreadStart(StableClient1));
+ Thread thread3 = new Thread(
+ new ThreadStart(StableClient2));
+ Thread thread4 = new Thread(
+ new ThreadStart(StableClient3));
+
+ thread1.Start();
+ Thread.Sleep(1000);
+ thread2.Start();
+ thread3.Start();
+ thread4.Start();
+
+ thread4.Join();
+ thread3.Join();
+ thread2.Join();
+ thread1.Join();
+
+ client1StartSignal.Close();
+ client2StartSignal.Close();
+ client1ReadySignal.Close();
+ client2ReadySignal.Close();
+ client3ReadySignal.Close();
+ client3StartSignal.Close();
+ masterLeaveSignal.Close();
+ }
+
+ public void UnstableMaster()
+ {
+ string home = testHome + "/UnstableMaster";
+ Configuration.ClearDir(home);
+
+ // Open environment with replication configuration.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.UseReplication = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 20485760, 1);
+ cfg.UseLocking = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.Create = true;
+ cfg.UseLogging = true;
+ cfg.RunRecovery = true;
+ cfg.TxnNoSync = true;
+ cfg.FreeThreaded = true;
+ cfg.RepSystemCfg = new ReplicationConfig();
+ cfg.RepSystemCfg.RepMgrLocalSite =
+ new ReplicationHostAddress("127.0.0.1", 8888);
+ cfg.RepSystemCfg.Priority = 200;
+ cfg.RepSystemCfg.NSites = 4;
+ cfg.RepSystemCfg.ElectionRetry = 10;
+ cfg.RepSystemCfg.RepMgrAckPolicy = AckPolicy.ALL;
+ cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, cfg);
+ env.DeadlockResolution = DeadlockPolicy.DEFAULT;
+
+ // Start as master site.
+ env.RepMgrStartMaster(3);
+
+ Console.WriteLine("Master: Finish initialization");
+
+ // Notify clients to join.
+ client1StartSignal.Set();
+ client2StartSignal.Set();
+ client3StartSignal.Set();
+
+ // Wait for initialization of all clients.
+ client1ReadySignal.WaitOne();
+ client2ReadySignal.WaitOne();
+ client3ReadySignal.WaitOne();
+
+ List<uint> ports = new List<uint>();
+ ports.Add(5888);
+ ports.Add(6888);
+ ports.Add(7888);
+ foreach (RepMgrSite site in env.RepMgrRemoteSites)
+ {
+ Assert.AreEqual("127.0.0.1", site.Address.Host);
+ Assert.IsTrue(ports.Contains(site.Address.Port));
+ Assert.Greater(4, site.EId);
+ Assert.IsTrue(site.isConnected);
+ }
+
+ // After all of them are ready, close the current master.
+ Console.WriteLine("Master: Unexpected leave.");
+ env.LogFlush();
+ env.Close();
+ masterLeaveSignal.Set();
+ }
+
+ public void StableClient1()
+ {
+ string home = testHome + "/StableClient1";
+ Configuration.ClearDir(home);
+
+ // Get notification from master and start the #1 client.
+ client1StartSignal.WaitOne();
+ Console.WriteLine("Client1: Join the replication");
+
+ // Open the environment.
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.UseReplication = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 20485760, 1);
+ cfg.UseLocking = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.Create = true;
+ cfg.UseLogging = true;
+ cfg.RunRecovery = true;
+ cfg.TxnNoSync = true;
+ cfg.FreeThreaded = true;
+ cfg.LockTimeout = 50000;
+ cfg.RepSystemCfg = new ReplicationConfig();
+ cfg.RepSystemCfg.RepMgrLocalSite =
+ new ReplicationHostAddress("127.0.0.1", 7888);
+ cfg.RepSystemCfg.Priority = 10;
+ cfg.RepSystemCfg.AddRemoteSite(
+ new ReplicationHostAddress("127.0.0.1", 8888), false);
+ cfg.RepSystemCfg.AddRemoteSite(
+ new ReplicationHostAddress("127.0.0.1", 5888), true);
+ cfg.RepSystemCfg.NSites = 4;
+ cfg.RepSystemCfg.ElectionRetry = 10;
+ cfg.RepSystemCfg.RepMgrAckPolicy = AckPolicy.NONE;
+ cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, cfg);
+ env.DeadlockResolution = DeadlockPolicy.DEFAULT;
+
+ // Start the client who won't raise any election.
+ env.RepMgrStartClient(3, false);
+
+ // Leave enough time to sync.
+ Thread.Sleep(20000);
+
+ // The current client site is fully initialized.
+ client1ReadySignal.Set();
+
+ // Wait for master's leave signal.
+ masterLeaveSignal.WaitOne();
+
+ /*
+ * Set the master's leave signal so that other clients
+ * could be informed.
+ */
+ masterLeaveSignal.Set();
+
+ // Leave sometime for client to hold election.
+ Thread.Sleep(10000);
+
+ env.LogFlush();
+ env.Close();
+ Console.WriteLine("Client1: Leaving the replication");
+ }
+
+
+ public void StableClient2()
+ {
+ string home = testHome + "/StableClient2";
+ Configuration.ClearDir(home);
+
+ client2StartSignal.WaitOne();
+ Console.WriteLine("Client2: Join the replication");
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.UseReplication = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 20485760, 1);
+ cfg.UseLocking = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.Create = true;
+ cfg.UseLogging = true;
+ cfg.RunRecovery = true;
+ cfg.TxnNoSync = true;
+ cfg.FreeThreaded = true;
+ cfg.LockTimeout = 50000;
+ cfg.RepSystemCfg = new ReplicationConfig();
+ cfg.RepSystemCfg.RepMgrLocalSite =
+ new ReplicationHostAddress("127.0.0.1", 6888);
+ cfg.RepSystemCfg.Priority = 20;
+ cfg.RepSystemCfg.AddRemoteSite(
+ new ReplicationHostAddress("127.0.0.1", 8888), false);
+ cfg.RepSystemCfg.AddRemoteSite(
+ new ReplicationHostAddress("127.0.0.1", 7888), true);
+ cfg.RepSystemCfg.NSites = 4;
+ cfg.RepSystemCfg.ElectionRetry = 10;
+ cfg.RepSystemCfg.RepMgrAckPolicy =
+ AckPolicy.ONE_PEER;
+ cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, cfg);
+ env.DeadlockResolution = DeadlockPolicy.DEFAULT;
+
+ // Start the client who will raise election if no master.
+ env.RepMgrStartClient(3, true);
+
+ // Leave enough time to sync.
+ Thread.Sleep(20000);
+
+ // The current client site is fully initialized.
+ client2ReadySignal.Set();
+
+ // Wait for master's leave signal.
+ masterLeaveSignal.WaitOne();
+
+ /*
+ * Set the master's leave signal so that other clients
+ * could be informed.
+ */
+ masterLeaveSignal.Set();
+
+ // Leave sometime for client to hold election.
+ Thread.Sleep(5000);
+
+ env.LogFlush();
+ env.Close();
+ Console.WriteLine("Client2: Leaving the replication");
+ }
+
+ public void StableClient3()
+ {
+ string home = testHome + "/StableClient3";
+ Configuration.ClearDir(home);
+
+ client3StartSignal.WaitOne();
+ Console.WriteLine("Client3: Join the replication");
+
+ DatabaseEnvironmentConfig cfg =
+ new DatabaseEnvironmentConfig();
+ cfg.UseReplication = true;
+ cfg.MPoolSystemCfg = new MPoolConfig();
+ cfg.MPoolSystemCfg.CacheSize =
+ new CacheInfo(0, 20485760, 1);
+ cfg.UseLocking = true;
+ cfg.UseTxns = true;
+ cfg.UseMPool = true;
+ cfg.Create = true;
+ cfg.UseLogging = true;
+ cfg.RunRecovery = true;
+ cfg.TxnNoSync = true;
+ cfg.FreeThreaded = true;
+ cfg.LockTimeout = 50000;
+ cfg.RepSystemCfg = new ReplicationConfig();
+ cfg.RepSystemCfg.RepMgrLocalSite =
+ new ReplicationHostAddress("127.0.0.1", 5888);
+ cfg.RepSystemCfg.Priority = 80;
+ cfg.RepSystemCfg.AddRemoteSite(
+ new ReplicationHostAddress("127.0.0.1", 8888), false);
+ cfg.RepSystemCfg.AddRemoteSite(
+ new ReplicationHostAddress("127.0.0.1", 6888), true);
+ cfg.RepSystemCfg.NSites = 4;
+ cfg.EventNotify = new EventNotifyDelegate(stuffHappened);
+ cfg.RepSystemCfg.ElectionRetry = 10;
+ cfg.RepSystemCfg.RepMgrAckPolicy = AckPolicy.QUORUM;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, cfg);
+ env.DeadlockResolution = DeadlockPolicy.DEFAULT;
+
+ env.RepMgrStartClient(3, false);
+
+ // Leave enough time to sync with master.
+ Thread.Sleep(20000);
+
+ // The current client site is fully initialized.
+ client3ReadySignal.Set();
+
+ // Wait for master's leave signal.
+ masterLeaveSignal.WaitOne();
+
+ /*
+ * Set the master's leave signal so that other clients
+ * could be informed.
+ */
+ masterLeaveSignal.Set();
+
+ /*
+ * Master will leave the replication after all clients'
+ * initialization. Leave sometime for master to leave
+ * and for clients elect.
+ */
+ Thread.Sleep(5000);
+
+ ReplicationStats repStats = env.ReplicationSystemStats();
+ Assert.LessOrEqual(0, repStats.Elections);
+ Assert.LessOrEqual(0, repStats.ElectionTiebreaker);
+ Assert.LessOrEqual(0,
+ repStats.ElectionTimeSec + repStats.ElectionTimeUSec);
+ Assert.LessOrEqual(0, repStats.MasterChanges);
+ Assert.LessOrEqual(0, repStats.NewSiteMessages);
+ Assert.LessOrEqual(0, repStats.ReceivedLogRecords);
+ Assert.LessOrEqual(0, repStats.ReceivedMessages);
+ Assert.LessOrEqual(0, repStats.ReceivedPages);
+ Assert.GreaterOrEqual(4, repStats.RegisteredSitesNeeded);
+ Assert.LessOrEqual(0, repStats.Sites);
+
+ /*
+ * Client 3 will be the new master. The Elected master should wait
+ * until all other clients leave.
+ */
+ Thread.Sleep(10000);
+
+ env.LogFlush();
+ env.Close();
+ Console.WriteLine("Client3: Leaving the replication");
+ }
+
+ [Test]
+ public void TestAckPolicy()
+ {
+ testName = "TestAckPolicy";
+ testHome = testFixtureHome + "/" + testName;
+
+ SetRepMgrAckPolicy(testHome + "_ALL", AckPolicy.ALL);
+ SetRepMgrAckPolicy(testHome + "_ALL_PEERS",
+ AckPolicy.ALL_PEERS);
+ SetRepMgrAckPolicy(testHome + "_NONE",
+ AckPolicy.NONE);
+ SetRepMgrAckPolicy(testHome + "_ONE",
+ AckPolicy.ONE);
+ SetRepMgrAckPolicy(testHome + "_ONE_PEER",
+ AckPolicy.ONE_PEER);
+ SetRepMgrAckPolicy(testHome + "_QUORUM",
+ AckPolicy.QUORUM);
+ SetRepMgrAckPolicy(testHome + "_NULL", null);
+ }
+
+ public void SetRepMgrAckPolicy(string home, AckPolicy policy)
+ {
+ Configuration.ClearDir(home);
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLocking = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseReplication = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+ if (policy != null)
+ {
+ env.RepMgrAckPolicy = policy;
+ Assert.AreEqual(policy, env.RepMgrAckPolicy);
+ }
+ env.Close();
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SecondaryBTreeDatabaseConfigTest.cs b/db-4.8.30/test/scr037/SecondaryBTreeDatabaseConfigTest.cs
new file mode 100644
index 0000000..09724af
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryBTreeDatabaseConfigTest.cs
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryBTreeDatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryBTreeDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ // Open a primary btree database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ SecondaryBTreeDatabaseConfig secDBConfig =
+ new SecondaryBTreeDatabaseConfig(btreeDB, null);
+
+ Config(xmlElem, ref secDBConfig, true);
+ Confirm(xmlElem, secDBConfig, true);
+
+ // Close the primary btree database.
+ btreeDB.Close();
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ SecondaryBTreeDatabaseConfig secBtreeDBConfig,
+ bool compulsory)
+ {
+ SecondaryDatabaseConfig secDBConfig =
+ secBtreeDBConfig;
+ SecondaryDatabaseConfigTest.Confirm(xmlElement,
+ secDBConfig, compulsory);
+
+ // Confirm secondary btree database specific configuration.
+ Configuration.ConfirmCreatePolicy(xmlElement,
+ "Creation", secBtreeDBConfig.Creation, compulsory);
+ Configuration.ConfirmDuplicatesPolicy(xmlElement,
+ "Duplicates", secBtreeDBConfig.Duplicates, compulsory);
+ Configuration.ConfirmUint(xmlElement, "MinKeysPerPage",
+ secBtreeDBConfig.MinKeysPerPage, compulsory);
+ Configuration.ConfirmBool(xmlElement,
+ "NoReverseSplitting",
+ secBtreeDBConfig.NoReverseSplitting, compulsory);
+ Configuration.ConfirmBool(xmlElement,
+ "UseRecordNumbers",
+ secBtreeDBConfig.UseRecordNumbers,
+ compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref SecondaryBTreeDatabaseConfig secBtreeDBConfig,
+ bool compulsory)
+ {
+ uint minKeysPerPage = new uint();
+
+ SecondaryDatabaseConfig secDBConfig = secBtreeDBConfig;
+ SecondaryDatabaseConfigTest.Config(xmlElement,
+ ref secDBConfig, compulsory);
+
+ // Configure specific fields/properties of Btree db
+ Configuration.ConfigCreatePolicy(xmlElement,
+ "Creation", ref secBtreeDBConfig.Creation, compulsory);
+ Configuration.ConfigDuplicatesPolicy(xmlElement,
+ "Duplicates", ref secBtreeDBConfig.Duplicates,
+ compulsory);
+ if (Configuration.ConfigUint(xmlElement,
+ "MinKeysPerPage", ref minKeysPerPage, compulsory))
+ secBtreeDBConfig.MinKeysPerPage = minKeysPerPage;
+ Configuration.ConfigBool(xmlElement,
+ "NoReverseSplitting",
+ ref secBtreeDBConfig.NoReverseSplitting, compulsory);
+ Configuration.ConfigBool(xmlElement,
+ "UseRecordNumbers",
+ ref secBtreeDBConfig.UseRecordNumbers, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SecondaryBTreeDatabaseTest.cs b/db-4.8.30/test/scr037/SecondaryBTreeDatabaseTest.cs
new file mode 100644
index 0000000..5c1fdfb
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryBTreeDatabaseTest.cs
@@ -0,0 +1,232 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryBTreeDatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryBTreeDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestOpen()
+ {
+ testName = "TestOpen";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ // Open a primary btree database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ // Open a secondary btree database.
+ SecondaryBTreeDatabaseConfig secDBConfig =
+ new SecondaryBTreeDatabaseConfig(btreeDB, null);
+
+ SecondaryBTreeDatabaseConfigTest.Config(xmlElem,
+ ref secDBConfig, true);
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(dbSecFileName,
+ secDBConfig);
+
+ // Confirm its flags configured in secDBConfig.
+ Confirm(xmlElem, secDB, true);
+
+ secDB.Close();
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestCompare()
+ {
+ testName = "TestCompare";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary btree database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ // Open a secondary btree database.
+ SecondaryBTreeDatabaseConfig secBtreeDBConfig =
+ new SecondaryBTreeDatabaseConfig(null, null);
+ secBtreeDBConfig.Primary = btreeDB;
+ secBtreeDBConfig.Compare =
+ new EntryComparisonDelegate(
+ SecondaryEntryComparison);
+ secBtreeDBConfig.KeyGen =
+ new SecondaryKeyGenDelegate(SecondaryKeyGen);
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(
+ dbFileName, secBtreeDBConfig);
+
+ /*
+ * Get the compare function set in the configuration
+ * and run it in a comparison to see if it is alright.
+ */
+ EntryComparisonDelegate cmp =
+ secDB.Compare;
+ DatabaseEntry dbt1, dbt2;
+ dbt1 = new DatabaseEntry(
+ BitConverter.GetBytes((int)257));
+ dbt2 = new DatabaseEntry(
+ BitConverter.GetBytes((int)255));
+ Assert.Less(0, cmp(dbt1, dbt2));
+
+ secDB.Close();
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestDuplicates()
+ {
+ testName = "TestDuplicates";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName
+ + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary btree database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ // Open a secondary btree database.
+ SecondaryBTreeDatabaseConfig secBtreeDBConfig =
+ new SecondaryBTreeDatabaseConfig(btreeDB, null);
+ secBtreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ secBtreeDBConfig.Duplicates = DuplicatesPolicy.SORTED;
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(dbSecFileName,
+ secBtreeDBConfig);
+
+ secDB.Close();
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestPrefixCompare()
+ {
+ testName = "TestPrefixCompare";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary btree database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.ALWAYS;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ // Open a secondary btree database.
+ SecondaryBTreeDatabaseConfig secBtreeDBConfig =
+ new SecondaryBTreeDatabaseConfig(btreeDB, null);
+ secBtreeDBConfig.Primary = btreeDB;
+ secBtreeDBConfig.Compare =
+ new EntryComparisonDelegate(
+ SecondaryEntryComparison);
+ secBtreeDBConfig.PrefixCompare =
+ new EntryComparisonDelegate(
+ SecondaryEntryComparison);
+ secBtreeDBConfig.KeyGen =
+ new SecondaryKeyGenDelegate(
+ SecondaryKeyGen);
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(
+ dbFileName, secBtreeDBConfig);
+
+ /*
+ * Get the prefix compare function set in the
+ * configuration and run it in a comparison to
+ * see if it is alright.
+ */
+ EntryComparisonDelegate cmp =
+ secDB.PrefixCompare;
+ DatabaseEntry dbt1, dbt2;
+ dbt1 = new DatabaseEntry(
+ BitConverter.GetBytes((int)1));
+ dbt2 = new DatabaseEntry(
+ BitConverter.GetBytes((int)129));
+ Assert.Greater(0, cmp(dbt1, dbt2));
+
+ secDB.Close();
+ btreeDB.Close();
+ }
+
+ public int SecondaryEntryComparison(
+ DatabaseEntry dbt1, DatabaseEntry dbt2)
+ {
+ int a, b;
+ a = BitConverter.ToInt32(dbt1.Data, 0);
+ b = BitConverter.ToInt32(dbt2.Data, 0);
+ return a - b;
+ }
+
+ public DatabaseEntry SecondaryKeyGen(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ DatabaseEntry dbtGen;
+ dbtGen = new DatabaseEntry(data.Data);
+ return dbtGen;
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ SecondaryBTreeDatabase secDB, bool compulsory)
+ {
+ Configuration.ConfirmDuplicatesPolicy(xmlElem,
+ "Duplicates", secDB.Duplicates, compulsory);
+ Configuration.ConfirmUint(xmlElem, "MinKeysPerPage",
+ secDB.MinKeysPerPage, compulsory);
+ Configuration.ConfirmBool(xmlElem, "NoReverseSplitting",
+ secDB.ReverseSplit, compulsory);
+ Configuration.ConfirmBool(xmlElem, "UseRecordNumbers",
+ secDB.RecordNumbers, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SecondaryCursorTest.cs b/db-4.8.30/test/scr037/SecondaryCursorTest.cs
new file mode 100644
index 0000000..1c3ca6d
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryCursorTest.cs
@@ -0,0 +1,1214 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryCursorTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryCursorTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ /*
+ * Delete existing test ouput directory and files specified
+ * for the current test fixture and then create a new one.
+ */
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestDuplicate()
+ {
+ testName = "TestDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary database.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase db = BTreeDatabase.Open(
+ dbFileName, dbConfig);
+
+ // Open a secondary database.
+ SecondaryBTreeDatabaseConfig secConfig =
+ new SecondaryBTreeDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secConfig.Creation = CreatePolicy.IF_NEEDED;
+ secConfig.Duplicates = DuplicatesPolicy.UNSORTED;
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(dbSecFileName,
+ secConfig);
+
+ // Put a pair of key and data into the database.
+ DatabaseEntry key, data;
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ db.Put(key, data);
+
+ // Create a cursor.
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ cursor.Move(key, true);
+
+ // Duplicate the cursor.
+ SecondaryCursor dupCursor;
+ dupCursor = cursor.Duplicate(true);
+
+ /*
+ * Confirm that the duplicate cursor has the same
+ * position as the original one.
+ */
+ Assert.AreEqual(cursor.Current.Key,
+ dupCursor.Current.Key);
+ Assert.AreEqual(cursor.Current.Value,
+ dupCursor.Current.Value);
+
+ // Close the cursor and the duplicate cursor.
+ dupCursor.Close();
+ cursor.Close();
+
+ // Close secondary and primary database.
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestDelete()
+ {
+ testName = "TestDelete";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary database and its secondary database.
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db, out secDB);
+
+ // Put a pair of key and data into database.
+ DatabaseEntry key, data;
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ db.Put(key, data);
+
+ // Delete the pair with cursor.
+ SecondaryCursor secCursor = secDB.SecondaryCursor();
+ Assert.IsTrue(secCursor.MoveFirst());
+ secCursor.Delete();
+
+ // Confirm that the pair is deleted.
+ Assert.IsFalse(db.Exists(key));
+
+ // Close all databases.
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestCurrent()
+ {
+ testName = "TestCurrent";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary database and its secondary database.
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db, out secDB);
+
+ // Put a pair of key and data into database.
+ DatabaseEntry key, data;
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ db.Put(key, data);
+
+ // Delete the pair with cursor.
+ SecondaryCursor secCursor = secDB.SecondaryCursor();
+ Assert.IsTrue(secCursor.Move(data, true));
+
+ // Confirm that the current is the one we put into database.
+ Assert.AreEqual(data.Data, secCursor.Current.Key.Data);
+
+ // Close all databases.
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestGetEnumerator()
+ {
+ testName = "TestGetEnumerator";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary database and its secondary database.
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+
+ // Write ten records into the database.
+ WriteRecords(db);
+
+ /*
+ * Get all records from the secondary database and see
+ * if the records with key other than 10 have the same as
+ * their primary key.
+ */
+ SecondaryCursor secCursor = secDB.SecondaryCursor();
+ foreach (KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> secData in secCursor)
+ {
+ if (BitConverter.ToInt32(secData.Key.Data, 0) != 10)
+ Assert.AreEqual(secData.Value.Key.Data,
+ secData.Value.Value.Data);
+ }
+
+ // Close all cursors and databases.
+ secCursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveToKey()
+ {
+ testName = "TestMoveToKey";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ MoveToPos(dbFileName, dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestMoveToPair()
+ {
+ testName = "TestMoveToPair";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ MoveToPos(dbFileName, dbSecFileName, true);
+ }
+
+ public void MoveToPos(string dbFileName,
+ string dbSecFileName, bool ifPair)
+ {
+ // Open a primary database and its secondary database.
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+
+ // Write ten records into the database.
+ WriteRecords(db);
+
+ SecondaryCursor secCursor = secDB.SecondaryCursor();
+ DatabaseEntry key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ DatabaseEntry notExistingKey = new DatabaseEntry(
+ BitConverter.GetBytes((int)100));
+ if (ifPair == false)
+ {
+ Assert.IsTrue(secCursor.Move(key, true));
+ Assert.IsFalse(secCursor.Move(notExistingKey, true));
+ }
+ else
+ {
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair =
+ new KeyValuePair<DatabaseEntry,
+ KeyValuePair<DatabaseEntry, DatabaseEntry>>(key,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ key, key));
+
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> notExistingPair;
+
+ notExistingPair = new KeyValuePair<DatabaseEntry,
+ KeyValuePair<DatabaseEntry, DatabaseEntry>>(
+ notExistingKey, new KeyValuePair<
+ DatabaseEntry, DatabaseEntry>(
+ notExistingKey, notExistingKey));
+ Assert.IsTrue(secCursor.Move(pair, true));
+ Assert.IsFalse(secCursor.Move(notExistingPair, true));
+ }
+
+ secCursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveToKeyWithLockingInfo()
+ {
+ testName = "TestMoveToKeyWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ MoveToPosWithLockingInfo(testHome, dbFileName,
+ dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestMoveToPairWithLockingInfo()
+ {
+ testName = "TestMoveToPairWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ MoveToPosWithLockingInfo(testHome, dbFileName,
+ dbSecFileName, true);
+ }
+
+ public void MoveToPosWithLockingInfo(string home,
+ string dbFileName, string dbSecFileName, bool ifPair)
+ {
+ // Open a primary database and its secondary database.
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(home, dbFileName, dbSecFileName,
+ out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ // Create an secondary cursor.
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor secCursor = secDB.SecondaryCursor(
+ cursorTxn);
+ DatabaseEntry key = new DatabaseEntry(
+ BitConverter.GetBytes((int)0));
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_THREE;
+ lockingInfo.ReadModifyWrite = true;
+ if (ifPair == false)
+ {
+ Assert.IsTrue(secCursor.Move(key, true, lockingInfo));
+ }
+ else
+ {
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair;
+
+ pair = new KeyValuePair<DatabaseEntry,
+ KeyValuePair<DatabaseEntry, DatabaseEntry>>(key,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ key, key));
+ Assert.IsTrue(secCursor.Move(pair, true, lockingInfo));
+ }
+ secCursor.Close();
+ cursorTxn.Commit();
+
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMoveFirst()
+ {
+ testName = "TestMoveFirst";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open primary and secondary database.
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+
+
+ // Write ten records into the database.
+ WriteRecords(db);
+
+ // Move the cursor to the first record(0,0).
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ Assert.IsTrue(cursor.MoveFirst());
+ Assert.AreEqual(BitConverter.GetBytes((int)0),
+ cursor.Current.Key.Data);
+
+ // Close all.
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveLast()
+ {
+ testName = "TestMoveLast";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ Assert.IsTrue(cursor.MoveLast());
+ Assert.AreEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNext()
+ {
+ testName = "TestMoveNext";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ cursor.MoveFirst();
+ for (int i = 0; i < 5; i++)
+ Assert.IsTrue(cursor.MoveNext());
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNextDuplicate()
+ {
+ testName = "TestMoveNextDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ // Create a cursor and move the cursor to duplicate record.
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ cursor.Move(new DatabaseEntry(
+ BitConverter.GetBytes((int)10)), true);
+ Assert.IsTrue(cursor.MoveNextDuplicate());
+ Assert.AreEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveNextUnique()
+ {
+ testName = "TestMoveNextUnique";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ /*
+ * Move cursor to duplicate record. Since the duplicate
+ * record has the largest key, moving to the next
+ * unique record should fail.
+ */
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ cursor.Move(new DatabaseEntry(
+ BitConverter.GetBytes((int)10)), true);
+ Assert.IsFalse(cursor.MoveNextUnique());
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMovePrev()
+ {
+ testName = "TestMovePrev";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ cursor.MoveLast();
+ for (int i = 0; i < 5; i++)
+ Assert.IsTrue(cursor.MovePrev());
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMovePrevDuplicate()
+ {
+ testName = "TestMovePrevDuplicate";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair;
+ DatabaseEntry pKey, pData;
+ pKey = new DatabaseEntry(BitConverter.GetBytes((int)6));
+ pData = new DatabaseEntry(BitConverter.GetBytes((int)10));
+ pair = new KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>>(pData,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ pKey, pData));
+ cursor.Move(pair, true);
+ Assert.IsTrue(cursor.MovePrevDuplicate());
+ Assert.AreEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+
+ [Test]
+ public void TestMovePrevUnique()
+ {
+ testName = "TestMovePrevUnique";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair;
+ DatabaseEntry pKey, pData;
+ pKey = new DatabaseEntry(BitConverter.GetBytes((int)6));
+ pData = new DatabaseEntry(BitConverter.GetBytes((int)10));
+ pair = new KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>>(pData,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ pKey, pData));
+ cursor.Move(pair, true);
+ Assert.IsTrue(cursor.MovePrevUnique());
+ Assert.AreNotEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestRefresh()
+ {
+ testName = "TestRefresh";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDB(dbFileName, dbSecFileName, out db,
+ out secDB);
+ WriteRecords(db);
+
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair;
+ DatabaseEntry pKey, pData;
+ pKey = new DatabaseEntry(BitConverter.GetBytes((int)6));
+ pData = new DatabaseEntry(BitConverter.GetBytes((int)10));
+ pair = new KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>>(pData,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ pKey, pData));
+ cursor.Move(pair, true);
+ Assert.IsTrue(cursor.Refresh());
+ Assert.AreEqual(pData.Data, cursor.Current.Key.Data);
+ Assert.AreEqual(pKey.Data, cursor.Current.Value.Key.Data);
+
+ cursor.Close();
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestMoveFirstWithLockingInfo()
+ {
+ testName = "TestMoveFirstWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName, dbSecFileName,
+ out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ // Move the cursor to the first record(0, 0).
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_THREE;
+ lockingInfo.ReadModifyWrite = true;
+ Assert.IsTrue(cursor.MoveFirst(lockingInfo));
+ Assert.AreEqual(BitConverter.GetBytes((int)0),
+ cursor.Current.Key.Data);
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMoveLastWithLockingInfo()
+ {
+ testName = "TestMoveLastWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName, dbSecFileName,
+ out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ /*
+ * Move the cursor to the last record(10, 6), that is
+ * record(6, 10) in primary database.
+ */
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_THREE;
+ lockingInfo.ReadModifyWrite = true;
+ Assert.IsTrue(cursor.MoveLast(lockingInfo));
+ Assert.AreEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMoveNextWithLockingInfo()
+ {
+ testName = "TestMoveNextWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName, dbSecFileName,
+ out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ /*
+ * Move cursor to the first record and move to next
+ * record for five times.
+ */
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_THREE;
+ lockingInfo.ReadModifyWrite = true;
+ cursor.MoveFirst(lockingInfo);
+ for (int i = 0; i < 5; i++)
+ Assert.IsTrue(cursor.MoveNext(lockingInfo));
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMoveNextDuplicateWithLockingInfo()
+ {
+ testName = "TestMoveNextDuplicateWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName,
+ dbSecFileName, out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ /*
+ * Create a cursor and move the cursor to duplicate
+ * record(10, 5), that is record(5,10) in primary database.
+ * Then move the cursor to the next duplicate record
+ * (10, 6), that is record(6,10) in primary database.
+ */
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_THREE;
+ lockingInfo.ReadModifyWrite = true;
+ cursor.Move(new DatabaseEntry(
+ BitConverter.GetBytes((int)10)), true, lockingInfo);
+ Assert.IsTrue(cursor.MoveNextDuplicate(lockingInfo));
+ Assert.AreEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMoveNextUniqueWithLockingInfo()
+ {
+ testName = "TestMoveNextUniqueWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName,
+ dbSecFileName, out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ /*
+ * Move cursor to duplicate record. Since the duplicate
+ * record has the largest key, moving to the next
+ * unique record should fail.
+ */
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_THREE;
+ lockingInfo.ReadModifyWrite = true;
+ cursor.Move(new DatabaseEntry(
+ BitConverter.GetBytes((int)10)), true);
+ Assert.IsFalse(cursor.MoveNextUnique());
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMovePrevWithLockingInfo()
+ {
+ testName = "TestMovePrevWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName,
+ dbSecFileName, out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ /*
+ * Move the cursor to the last record and move to its
+ * previous record for five times.
+ */
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_TWO;
+ lockingInfo.ReadModifyWrite = true;
+ cursor.MoveLast(lockingInfo);
+ for (int i = 0; i < 5; i++)
+ Assert.IsTrue(cursor.MovePrev(lockingInfo));
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestMovePrevDuplicateWithLockingInfo()
+ {
+ testName = "TestMovePrevDuplicateWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName,
+ dbSecFileName, out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_TWO;
+ lockingInfo.ReadModifyWrite = true;
+
+ /*
+ * Move the cursor to the record(10,6), that is the
+ * record(6, 10) in the primary database. Move to
+ * its previous duplicate record, that is (10,5).
+ */
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair;
+ DatabaseEntry pKey, pData;
+ pKey = new DatabaseEntry(BitConverter.GetBytes((int)6));
+ pData = new DatabaseEntry(BitConverter.GetBytes((int)10));
+ pair = new KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>>(pData,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ pKey, pData));
+ cursor.Move(pair, true, lockingInfo);
+ Assert.IsTrue(cursor.MovePrevDuplicate(lockingInfo));
+ Assert.AreEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+
+ [Test]
+ public void TestMovePrevUniqueWithLockingInfo()
+ {
+ testName = "TestMovePrevUniqueWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName,
+ dbSecFileName, out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_TWO;
+ lockingInfo.ReadModifyWrite = true;
+
+ /*
+ * Move the cursor to the record(10, 6) and move to the
+ * previous unique record which has different key from
+ * the record(10,6).
+ */
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair;
+ DatabaseEntry pKey, pData;
+ pKey = new DatabaseEntry(BitConverter.GetBytes((int)6));
+ pData = new DatabaseEntry(BitConverter.GetBytes((int)10));
+ pair = new KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>>(pData,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ pKey, pData));
+ cursor.Move(pair, true, lockingInfo);
+ Assert.IsTrue(cursor.MovePrevUnique(lockingInfo));
+ Assert.AreNotEqual(BitConverter.GetBytes((int)10),
+ cursor.Current.Key.Data);
+
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestRefreshWithLockingInfo()
+ {
+ testName = "TestRefreshWithLockingInfo";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open environment, primary database and
+ * secondary database.
+ */
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ SecondaryBTreeDatabase secDB;
+ OpenSecDBInTxn(testHome, dbFileName,
+ dbSecFileName, out env, out db, out secDB);
+
+ // Write ten records into the database.
+ WriteRecordsInTxn(db, env);
+
+ Transaction cursorTxn = env.BeginTransaction();
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorTxn);
+ LockingInfo lockingInfo = new LockingInfo();
+ lockingInfo.IsolationDegree = Isolation.DEGREE_TWO;
+ lockingInfo.ReadModifyWrite = true;
+
+ // Move cursor to a record and refresh it.
+ KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>> pair;
+ DatabaseEntry pKey, pData;
+ pKey = new DatabaseEntry(BitConverter.GetBytes((int)6));
+ pData = new DatabaseEntry(BitConverter.GetBytes((int)10));
+ pair = new KeyValuePair<DatabaseEntry, KeyValuePair<
+ DatabaseEntry, DatabaseEntry>>(pData,
+ new KeyValuePair<DatabaseEntry, DatabaseEntry>(
+ pKey, pData));
+ cursor.Move(pair, true, lockingInfo);
+ Assert.IsTrue(cursor.Refresh(lockingInfo));
+ Assert.AreEqual(pData.Data, cursor.Current.Key.Data);
+ Assert.AreEqual(pKey.Data, cursor.Current.Value.Key.Data);
+
+ cursor.Close();
+ cursorTxn.Commit();
+
+ // Close all.
+ secDB.Close();
+ db.Close();
+ env.Close();
+ }
+
+ public void OpenSecDBInTxn(string home, string dbFileName,
+ string dbSecFileName, out DatabaseEnvironment env,
+ out BTreeDatabase db, out SecondaryBTreeDatabase secDB)
+ {
+ // Open environment.
+ DatabaseEnvironmentConfig envCfg =
+ new DatabaseEnvironmentConfig();
+ envCfg.Create = true;
+ envCfg.UseLocking = true;
+ envCfg.UseLogging = true;
+ envCfg.UseMPool = true;
+ envCfg.UseTxns = true;
+ env = DatabaseEnvironment.Open(
+ home, envCfg);
+
+ // Open primary and secondary database in a transaction.
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ dbConfig.PageSize = 4096;
+ dbConfig.Duplicates = DuplicatesPolicy.NONE;
+ dbConfig.ReadUncommitted = true;
+ db = BTreeDatabase.Open(dbFileName, dbConfig,
+ openTxn);
+ openTxn.Commit();
+
+ openTxn = env.BeginTransaction();
+ SecondaryBTreeDatabaseConfig secConfig =
+ new SecondaryBTreeDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secConfig.Creation = CreatePolicy.IF_NEEDED;
+ secConfig.Duplicates = DuplicatesPolicy.SORTED;
+ secConfig.Env = env;
+ secConfig.ReadUncommitted = true;
+ secDB = SecondaryBTreeDatabase.Open(dbSecFileName,
+ secConfig, openTxn);
+ openTxn.Commit();
+ }
+
+ public void WriteRecords(BTreeDatabase db)
+ {
+ /*
+ * Write ten records into the database. The records
+ * from 1st to 5th and 8th to 10th are unique in the
+ * database. The data in the 6th and 7th records
+ * are the same.
+ */
+ for (int i = 0; i < 10; i++)
+ {
+ if (i == 5 || i == 6)
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)10)));
+ else
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+ }
+ }
+
+ public void WriteRecordsInTxn(BTreeDatabase db,
+ DatabaseEnvironment env)
+ {
+ Transaction txn = env.BeginTransaction();
+ /*
+ * Write ten records into the database. The records
+ * from 1st to 5th and 8th to 10th are unique in the
+ * database. The data in the 6th and 7th records
+ * are the same.
+ */
+ for (int i = 0; i < 10; i++)
+ {
+ if (i == 5 || i == 6)
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(
+ BitConverter.GetBytes((int)10)), txn);
+ else
+ db.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)), txn);
+ }
+
+ txn.Commit();
+ }
+
+
+ public void OpenSecDB(string dbFileName,
+ string dbSecFileName, out BTreeDatabase db,
+ out SecondaryBTreeDatabase secDB)
+ {
+ // Open a primary database.
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ db = BTreeDatabase.Open(dbFileName, dbConfig);
+
+ // Open a secondary database.
+ SecondaryBTreeDatabaseConfig secConfig =
+ new SecondaryBTreeDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secConfig.Creation = CreatePolicy.IF_NEEDED;
+ secConfig.Duplicates = DuplicatesPolicy.SORTED;
+ secDB = SecondaryBTreeDatabase.Open(dbSecFileName,
+ secConfig);
+ }
+
+
+ public DatabaseEntry SecondaryKeyGen(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ DatabaseEntry dbtGen;
+ dbtGen = new DatabaseEntry(data.Data);
+ return dbtGen;
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SecondaryDatabaseConfigTest.cs b/db-4.8.30/test/scr037/SecondaryDatabaseConfigTest.cs
new file mode 100644
index 0000000..eef4abf
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryDatabaseConfigTest.cs
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryDatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ virtual public void TestConfig()
+ {
+ testName = "TestConfig";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+
+ // Open a primary btree database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ SecondaryDatabaseConfig secDBConfig =
+ new SecondaryDatabaseConfig(btreeDB, null);
+ Config(xmlElem, ref secDBConfig, true);
+ Confirm(xmlElem, secDBConfig, true);
+ btreeDB.Close();
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref SecondaryDatabaseConfig secDBConfig,
+ bool compulsory)
+ {
+ Configuration.ConfigBool(xmlElement, "ImmutableKey",
+ ref secDBConfig.ImmutableKey, compulsory);
+ Configuration.ConfigBool(xmlElement, "Populate",
+ ref secDBConfig.Populate, compulsory);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ SecondaryDatabaseConfig secDBConfig,
+ bool compulsory)
+ {
+ Configuration.ConfirmBool(xmlElement, "ImmutableKey",
+ secDBConfig.ImmutableKey, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Populate",
+ secDBConfig.Populate, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SecondaryDatabaseTest.cs b/db-4.8.30/test/scr037/SecondaryDatabaseTest.cs
new file mode 100644
index 0000000..40f54fe
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryDatabaseTest.cs
@@ -0,0 +1,518 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryDatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestKeyGen()
+ {
+ testName = "TestKeyGen";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open primary database.
+ BTreeDatabaseConfig primaryDBConfig =
+ new BTreeDatabaseConfig();
+ primaryDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase primaryDB =
+ BTreeDatabase.Open(dbFileName, primaryDBConfig);
+
+ // Open secondary database.
+ SecondaryBTreeDatabaseConfig secDBConfig =
+ new SecondaryBTreeDatabaseConfig(primaryDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(dbFileName,
+ secDBConfig);
+
+ primaryDB.Put(new DatabaseEntry(
+ BitConverter.GetBytes((int)1)),
+ new DatabaseEntry(BitConverter.GetBytes((int)11)));
+
+ KeyValuePair<DatabaseEntry, DatabaseEntry> pair;
+ pair = secDB.Get(new DatabaseEntry(
+ BitConverter.GetBytes((int)11)));
+ Assert.IsNotNull(pair.Value);
+
+ // Close secondary database.
+ secDB.Close();
+
+ // Close primary database.
+ primaryDB.Close();
+ }
+
+ [Test]
+ public void TestSecondaryCursor()
+ {
+ testName = "TestSecondaryCursor";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open primary database.
+ BTreeDatabaseConfig primaryDBConfig =
+ new BTreeDatabaseConfig();
+ primaryDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase primaryDB =
+ BTreeDatabase.Open(dbFileName, primaryDBConfig);
+
+ // Open secondary database.
+ SecondaryBTreeDatabaseConfig secDBConfig =
+ new SecondaryBTreeDatabaseConfig(primaryDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(dbFileName,
+ secDBConfig);
+
+ primaryDB.Put(new DatabaseEntry(
+ BitConverter.GetBytes((int)1)),
+ new DatabaseEntry(BitConverter.GetBytes((int)11)));
+
+
+ SecondaryCursor cursor = secDB.SecondaryCursor();
+ cursor.Move(new DatabaseEntry(
+ BitConverter.GetBytes((int)11)), true);
+ Assert.AreEqual(BitConverter.GetBytes((int)11),
+ cursor.Current.Key.Data);
+
+ // Close the cursor.
+ cursor.Close();
+
+ // Close secondary database.
+ secDB.Close();
+
+ // Close primary database.
+ primaryDB.Close();
+ }
+
+ [Test]
+ public void TestSecondaryCursorWithConfig()
+ {
+ testName = "TestSecondaryCursorWithConfig";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ SecondaryBTreeDatabase secDB;
+ OpenPrimaryAndSecondaryDB(dbFileName, out db, out secDB);
+
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes((int)i)));
+
+ CursorConfig cursorConfig = new CursorConfig();
+ cursorConfig.WriteCursor = false;
+ SecondaryCursor cursor =
+ secDB.SecondaryCursor(cursorConfig);
+
+ cursor.Move(new DatabaseEntry(
+ BitConverter.GetBytes((int)5)), true);
+
+ Assert.AreEqual(1, cursor.Count());
+
+ // Close the cursor.
+ cursor.Close();
+
+ // Close secondary database.
+ secDB.Close();
+
+ // Close primary database.
+ db.Close();
+ }
+
+ [Test]
+ public void TestSecondaryCursorWithTxn()
+ {
+ testName = "TestSecondaryCursorWithTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ GetSecondaryCursurWithTxn(testHome, testName, false);
+ }
+
+ [Test]
+ public void TestSecondaryCursorWithConfigAndTxn()
+ {
+ testName = "TestSecondaryCursorWithConfigAndTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ GetSecondaryCursurWithTxn(testHome, testName, true);
+ }
+
+ public void GetSecondaryCursurWithTxn(string home,
+ string name, bool ifCfg)
+ {
+ string dbFileName = name + ".db";
+ SecondaryCursor cursor;
+
+ // Open env.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(home,
+ envConfig);
+
+
+ // Open primary/secondary database.
+ Transaction txn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig = new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(dbFileName,
+ dbConfig, txn);
+
+ SecondaryBTreeDatabaseConfig secDBConfig = new
+ SecondaryBTreeDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secDBConfig.Env = env;
+ SecondaryBTreeDatabase secDB =
+ SecondaryBTreeDatabase.Open(dbFileName,
+ secDBConfig, txn);
+
+ for (int i = 0; i < 10; i++)
+ db.Put(new DatabaseEntry(BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes((int)i)), txn);
+
+
+ // Create secondary cursor.
+ if (ifCfg == false)
+ secDB.SecondaryCursor(txn);
+ else if (ifCfg == true)
+ {
+ CursorConfig cursorConfig = new CursorConfig();
+ cursorConfig.WriteCursor = false;
+ cursor = secDB.SecondaryCursor(cursorConfig, txn);
+ cursor.Close();
+ }
+
+ secDB.Close();
+ db.Close();
+ txn.Commit();
+ env.Close();
+ }
+
+ [Test]
+ public void TestOpen()
+ {
+ testName = "TestOpen";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecQueueDB(dbFileName, dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestOpenWithDBName()
+ {
+ testName = "TestOpenWithDBName";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+ OpenSecQueueDB(dbFileName, dbSecFileName, true);
+ }
+
+ public void OpenSecQueueDB(string dbFileName,
+ string dbSecFileName, bool ifDBName)
+ {
+ // Open a primary btree database.
+ BTreeDatabaseConfig primaryDBConfig =
+ new BTreeDatabaseConfig();
+ primaryDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase primaryDB;
+
+ /*
+ * If secondary database name is given, the primary
+ * database is also opened with database name.
+ */
+ if (ifDBName == false)
+ primaryDB = BTreeDatabase.Open(dbFileName,
+ primaryDBConfig);
+ else
+ primaryDB = BTreeDatabase.Open(dbFileName,
+ "primary", primaryDBConfig);
+
+ try
+ {
+ // Open a new secondary database.
+ SecondaryBTreeDatabaseConfig secBTDBConfig =
+ new SecondaryBTreeDatabaseConfig(
+ primaryDB, null);
+ secBTDBConfig.Creation =
+ CreatePolicy.IF_NEEDED;
+
+ SecondaryBTreeDatabase secBTDB;
+ if (ifDBName == false)
+ secBTDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, secBTDBConfig);
+ else
+ secBTDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, "secondary",
+ secBTDBConfig);
+
+ // Close the secondary database.
+ secBTDB.Close();
+
+ // Open the existing secondary database.
+ SecondaryDatabaseConfig secDBConfig =
+ new SecondaryDatabaseConfig(
+ primaryDB, null);
+
+ SecondaryDatabase secDB;
+ if (ifDBName == false)
+ secDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, secDBConfig);
+ else
+ secDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, "secondary", secDBConfig);
+
+ // Close secondary database.
+ secDB.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ // Close primary database.
+ primaryDB.Close();
+ }
+ }
+
+ [Test]
+ public void TestOpenWithinTxn()
+ {
+ testName = "TestOpenWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecQueueDBWithinTxn(testHome, dbFileName,
+ dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestOpenDBNameWithinTxn()
+ {
+ testName = "TestOpenDBNameWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecQueueDBWithinTxn(testHome, dbFileName,
+ dbSecFileName, true);
+ }
+
+ public void OpenSecQueueDBWithinTxn(string home,
+ string dbFileName, string dbSecFileName, bool ifDbName)
+ {
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ // Open a primary btree database.
+ Transaction openDBTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ BTreeDatabase db = BTreeDatabase.Open(
+ dbFileName, dbConfig, openDBTxn);
+ openDBTxn.Commit();
+
+ // Open a secondary btree database.
+ Transaction openSecTxn = env.BeginTransaction();
+ SecondaryBTreeDatabaseConfig secDBConfig =
+ new SecondaryBTreeDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secDBConfig.Env = env;
+ secDBConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ SecondaryBTreeDatabase secDB;
+ if (ifDbName == false)
+ secDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, secDBConfig, openSecTxn);
+ else
+ secDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, "secondary", secDBConfig,
+ openSecTxn);
+ openSecTxn.Commit();
+ secDB.Close();
+
+ // Open the existing secondary database.
+ Transaction secTxn = env.BeginTransaction();
+ SecondaryDatabaseConfig secConfig =
+ new SecondaryDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secConfig.Env = env;
+
+ SecondaryDatabase secExDB;
+ if (ifDbName == false)
+ secExDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, secConfig, secTxn);
+ else
+ secExDB = SecondaryBTreeDatabase.Open(
+ dbSecFileName, "secondary", secConfig,
+ secTxn);
+ secExDB.Close();
+ secTxn.Commit();
+
+ db.Close();
+ env.Close();
+ }
+
+ public void OpenPrimaryAndSecondaryDB(string dbFileName,
+ out BTreeDatabase primaryDB,
+ out SecondaryBTreeDatabase secDB)
+ {
+ // Open primary database.
+ BTreeDatabaseConfig primaryDBConfig =
+ new BTreeDatabaseConfig();
+ primaryDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ primaryDB =
+ BTreeDatabase.Open(dbFileName, primaryDBConfig);
+
+ // Open secondary database.
+ SecondaryBTreeDatabaseConfig secDBConfig =
+ new SecondaryBTreeDatabaseConfig(primaryDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secDB = SecondaryBTreeDatabase.Open(dbFileName,
+ secDBConfig);
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestBadSecondaryException()
+ {
+ testName = "TestBadSecondaryException";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string secDBFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open primary database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB =
+ BTreeDatabase.Open(dbFileName, btreeDBConfig);
+
+ // Open secondary database.
+ SecondaryBTreeDatabaseConfig secBtDbConfig =
+ new SecondaryBTreeDatabaseConfig(btreeDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secBtDbConfig.Creation = CreatePolicy.IF_NEEDED;
+ SecondaryBTreeDatabase secBtDb =
+ SecondaryBTreeDatabase.Open(secDBFileName,
+ secBtDbConfig);
+
+ // Put some data into primary database.
+ for (int i = 0; i < 10; i++)
+ btreeDB.Put(new DatabaseEntry(
+ BitConverter.GetBytes(i)),
+ new DatabaseEntry(BitConverter.GetBytes(i)));
+
+ // Close the secondary database.
+ secBtDb.Close();
+
+ // Delete record(5, 5) in primary database.
+ btreeDB.Delete(new DatabaseEntry(
+ BitConverter.GetBytes((int)5)));
+
+ // Reopen the secondary database.
+ SecondaryDatabase secDB = SecondaryDatabase.Open(
+ secDBFileName,
+ new SecondaryDatabaseConfig(btreeDB,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen)));
+
+ /*
+ * Getting record(5, 5) by secondary database should
+ * throw BadSecondaryException since it has been
+ * deleted in the primary database.
+ */
+ try
+ {
+ secDB.Exists(new DatabaseEntry(
+ BitConverter.GetBytes((int)5)));
+ }
+ catch (BadSecondaryException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ secDB.Close();
+ btreeDB.Close();
+ }
+ }
+
+ public DatabaseEntry SecondaryKeyGen(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ DatabaseEntry dbtGen;
+ dbtGen = new DatabaseEntry(data.Data);
+ return dbtGen;
+ }
+
+ }
+
+} \ No newline at end of file
diff --git a/db-4.8.30/test/scr037/SecondaryHashDatabaseConfigTest.cs b/db-4.8.30/test/scr037/SecondaryHashDatabaseConfigTest.cs
new file mode 100644
index 0000000..7125a94
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryHashDatabaseConfigTest.cs
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryHashDatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryHashDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ // Open a primary btree database.
+ HashDatabaseConfig hashDBConfig =
+ new HashDatabaseConfig();
+ hashDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ HashDatabase hashDB = HashDatabase.Open(
+ dbFileName, hashDBConfig);
+
+ SecondaryHashDatabaseConfig secDBConfig =
+ new SecondaryHashDatabaseConfig(hashDB, null);
+
+ Config(xmlElem, ref secDBConfig, true);
+ Confirm(xmlElem, secDBConfig, true);
+
+ // Close the primary btree database.
+ hashDB.Close();
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ SecondaryHashDatabaseConfig secHashDBConfig,
+ bool compulsory)
+ {
+ SecondaryDatabaseConfig secDBConfig =
+ secHashDBConfig;
+ SecondaryDatabaseConfigTest.Confirm(xmlElement,
+ secDBConfig, compulsory);
+
+ // Confirm secondary hash database specific configuration.
+ Configuration.ConfirmCreatePolicy(xmlElement,
+ "Creation", secHashDBConfig.Creation, compulsory);
+ Configuration.ConfirmDuplicatesPolicy(xmlElement,
+ "Duplicates", secHashDBConfig.Duplicates, compulsory);
+ Configuration.ConfirmUint(xmlElement, "FillFactor",
+ secHashDBConfig.FillFactor, compulsory);
+ Configuration.ConfirmUint(xmlElement,
+ "NumElements",
+ secHashDBConfig.TableSize, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref SecondaryHashDatabaseConfig secHashDBConfig,
+ bool compulsory)
+ {
+ uint fillFactor = new uint();
+ uint numElements = new uint();
+
+ SecondaryDatabaseConfig secDBConfig = secHashDBConfig;
+ SecondaryDatabaseConfigTest.Config(xmlElement,
+ ref secDBConfig, compulsory);
+
+ // Configure specific fields/properties of hash db
+ Configuration.ConfigCreatePolicy(xmlElement,
+ "Creation", ref secHashDBConfig.Creation, compulsory);
+ Configuration.ConfigDuplicatesPolicy(xmlElement,
+ "Duplicates", ref secHashDBConfig.Duplicates, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "FillFactor",
+ ref fillFactor, compulsory))
+ secHashDBConfig.FillFactor = fillFactor;
+ if (Configuration.ConfigUint(xmlElement, "NumElements",
+ ref numElements, compulsory))
+ secHashDBConfig.TableSize = numElements;
+ }
+ }
+}
+
diff --git a/db-4.8.30/test/scr037/SecondaryHashDatabaseTest.cs b/db-4.8.30/test/scr037/SecondaryHashDatabaseTest.cs
new file mode 100644
index 0000000..839e8c9
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryHashDatabaseTest.cs
@@ -0,0 +1,403 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryHashDatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryHashDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestHashFunction()
+ {
+ testName = "TestHashFunction";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary hash database.
+ HashDatabaseConfig dbConfig =
+ new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ HashDatabase hashDB = HashDatabase.Open(
+ dbFileName, dbConfig);
+
+ /*
+ * Define hash function and open a secondary
+ * hash database.
+ */
+ SecondaryHashDatabaseConfig secDBConfig =
+ new SecondaryHashDatabaseConfig(hashDB, null);
+ secDBConfig.HashFunction =
+ new HashFunctionDelegate(HashFunction);
+ secDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ SecondaryHashDatabase secDB =
+ SecondaryHashDatabase.Open(dbSecFileName,
+ secDBConfig);
+
+ /*
+ * Confirm the hash function defined in the configuration.
+ * Call the hash function and the one from secondary
+ * database. If they return the same value, then the hash
+ * function is configured successfully.
+ */
+ uint data = secDB.HashFunction(BitConverter.GetBytes(1));
+ Assert.AreEqual(0, data);
+
+ // Close all.
+ secDB.Close();
+ hashDB.Close();
+ }
+
+ public uint HashFunction(byte[] data)
+ {
+ data[0] = 0;
+ return BitConverter.ToUInt32(data, 0);
+ }
+
+ [Test]
+ public void TestCompare()
+ {
+ testName = "TestCompare";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName +
+ "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary hash database.
+ HashDatabaseConfig dbConfig =
+ new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ HashDatabase db = HashDatabase.Open(
+ dbFileName, dbConfig);
+
+ // Open a secondary hash database.
+ SecondaryHashDatabaseConfig secConfig =
+ new SecondaryHashDatabaseConfig(null, null);
+ secConfig.Creation = CreatePolicy.IF_NEEDED;
+ secConfig.Primary = db;
+ secConfig.Compare =
+ new EntryComparisonDelegate(SecondaryEntryComparison);
+ SecondaryHashDatabase secDB =
+ SecondaryHashDatabase.Open(dbSecFileName, secConfig);
+
+ /*
+ * Get the compare function set in the configuration
+ * and run it in a comparison to see if it is alright.
+ */
+ DatabaseEntry dbt1, dbt2;
+ dbt1 = new DatabaseEntry(
+ BitConverter.GetBytes((int)257));
+ dbt2 = new DatabaseEntry(
+ BitConverter.GetBytes((int)255));
+ Assert.Less(0, secDB.Compare(dbt1, dbt2));
+
+ secDB.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestDuplicates()
+ {
+ testName = "TestDuplicates";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" + testName
+ + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a primary hash database.
+ HashDatabaseConfig dbConfig =
+ new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.ALWAYS;
+ HashDatabase db = HashDatabase.Open(
+ dbFileName, dbConfig);
+
+ // Open a secondary hash database.
+ SecondaryHashDatabaseConfig secConfig =
+ new SecondaryHashDatabaseConfig(null, null);
+ secConfig.Primary = db;
+ secConfig.Duplicates = DuplicatesPolicy.SORTED;
+ secConfig.Creation = CreatePolicy.IF_NEEDED;
+ SecondaryHashDatabase secDB =
+ SecondaryHashDatabase.Open(
+ dbSecFileName, secConfig);
+
+ // Confirm the duplicate in opened secondary database.
+ Assert.AreEqual(DuplicatesPolicy.SORTED,
+ secDB.Duplicates);
+
+ secDB.Close();
+ db.Close();
+ }
+
+
+ /*
+ * Tests to all Open() share the same configuration in
+ * AllTestData.xml.
+ */
+ [Test]
+ public void TestOpen()
+ {
+ testName = "TestOpen";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecHashDB(testFixtureName, "TestOpen",
+ dbFileName, dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestOpenWithDBName()
+ {
+ testName = "TestOpenWithDBName";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecHashDB(testFixtureName, "TestOpen",
+ dbFileName, dbSecFileName, true);
+ }
+
+ public void OpenSecHashDB(string className,
+ string funName, string dbFileName, string dbSecFileName,
+ bool ifDBName)
+ {
+ XmlElement xmlElem = Configuration.TestSetUp(
+ className, funName);
+
+ // Open a primary recno database.
+ HashDatabaseConfig primaryDBConfig =
+ new HashDatabaseConfig();
+ primaryDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ HashDatabase primaryDB;
+
+ /*
+ * If secondary database name is given, the primary
+ * database is also opened with database name.
+ */
+ if (ifDBName == false)
+ primaryDB = HashDatabase.Open(dbFileName,
+ primaryDBConfig);
+ else
+ primaryDB = HashDatabase.Open(dbFileName,
+ "primary", primaryDBConfig);
+
+ try
+ {
+ // Open a new secondary database.
+ SecondaryHashDatabaseConfig secHashDBConfig =
+ new SecondaryHashDatabaseConfig(
+ primaryDB, null);
+ SecondaryHashDatabaseConfigTest.Config(
+ xmlElem, ref secHashDBConfig, false);
+ secHashDBConfig.Creation =
+ CreatePolicy.IF_NEEDED;
+
+ SecondaryHashDatabase secHashDB;
+ if (ifDBName == false)
+ secHashDB = SecondaryHashDatabase.Open(
+ dbSecFileName, secHashDBConfig);
+ else
+ secHashDB = SecondaryHashDatabase.Open(
+ dbSecFileName, "secondary",
+ secHashDBConfig);
+
+ // Close the secondary database.
+ secHashDB.Close();
+
+ // Open the existing secondary database.
+ SecondaryDatabaseConfig secDBConfig =
+ new SecondaryDatabaseConfig(
+ primaryDB, null);
+
+ SecondaryDatabase secDB;
+ if (ifDBName == false)
+ secDB = SecondaryHashDatabase.Open(
+ dbSecFileName, secDBConfig);
+ else
+ secDB = SecondaryHashDatabase.Open(
+ dbSecFileName, "secondary", secDBConfig);
+
+ // Close secondary database.
+ secDB.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ // Close primary database.
+ primaryDB.Close();
+ }
+ }
+
+ [Test]
+ public void TestOpenWithinTxn()
+ {
+ testName = "TestOpenWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecHashDBWithinTxn(testFixtureName,
+ "TestOpen", testHome, dbFileName,
+ dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestOpenDBNameWithinTxn()
+ {
+ testName = "TestOpenDBNameWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecHashDBWithinTxn(testFixtureName,
+ "TestOpen", testHome, dbFileName,
+ dbSecFileName, true);
+ }
+
+ public void OpenSecHashDBWithinTxn(string className,
+ string funName, string home, string dbFileName,
+ string dbSecFileName, bool ifDbName)
+ {
+ XmlElement xmlElem = Configuration.TestSetUp(
+ className, funName);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ // Open a primary hash database.
+ Transaction openDBTxn = env.BeginTransaction();
+ HashDatabaseConfig dbConfig =
+ new HashDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ HashDatabase db = HashDatabase.Open(
+ dbFileName, dbConfig, openDBTxn);
+ openDBTxn.Commit();
+
+ // Open a secondary hash database.
+ Transaction openSecTxn = env.BeginTransaction();
+ SecondaryHashDatabaseConfig secDBConfig =
+ new SecondaryHashDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ SecondaryHashDatabaseConfigTest.Config(xmlElem,
+ ref secDBConfig, false);
+ secDBConfig.HashFunction = null;
+ secDBConfig.Env = env;
+ SecondaryHashDatabase secDB;
+ if (ifDbName == false)
+ secDB = SecondaryHashDatabase.Open(
+ dbSecFileName, secDBConfig, openSecTxn);
+ else
+ secDB = SecondaryHashDatabase.Open(
+ dbSecFileName, "secondary", secDBConfig,
+ openSecTxn);
+ openSecTxn.Commit();
+
+ // Confirm its flags configured in secDBConfig.
+ Confirm(xmlElem, secDB, true);
+ secDB.Close();
+
+ // Open the existing secondary database.
+ Transaction secTxn = env.BeginTransaction();
+ SecondaryDatabaseConfig secConfig =
+ new SecondaryDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secConfig.Env = env;
+
+ SecondaryDatabase secExDB;
+ if (ifDbName == false)
+ secExDB = SecondaryHashDatabase.Open(
+ dbSecFileName, secConfig, secTxn);
+ else
+ secExDB = SecondaryHashDatabase.Open(
+ dbSecFileName, "secondary", secConfig,
+ secTxn);
+ secExDB.Close();
+ secTxn.Commit();
+
+ db.Close();
+ env.Close();
+ }
+
+ public DatabaseEntry SecondaryKeyGen(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ DatabaseEntry dbtGen;
+ dbtGen = new DatabaseEntry(data.Data);
+ return dbtGen;
+ }
+
+ public int SecondaryEntryComparison(
+ DatabaseEntry dbt1, DatabaseEntry dbt2)
+ {
+ int a, b;
+ a = BitConverter.ToInt32(dbt1.Data, 0);
+ b = BitConverter.ToInt32(dbt2.Data, 0);
+ return a - b;
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ SecondaryHashDatabase secDB, bool compulsory)
+ {
+ Configuration.ConfirmUint(xmlElem, "FillFactor",
+ secDB.FillFactor, compulsory);
+ Configuration.ConfirmUint(xmlElem, "NumElements",
+ secDB.TableSize * secDB.FillFactor, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SecondaryQueueDatabaseConfigTest.cs b/db-4.8.30/test/scr037/SecondaryQueueDatabaseConfigTest.cs
new file mode 100644
index 0000000..4807335
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryQueueDatabaseConfigTest.cs
@@ -0,0 +1,109 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryQueueDatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryQueueDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ // Open a primary btree database.
+ QueueDatabaseConfig queueDBConfig =
+ new QueueDatabaseConfig();
+ queueDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ QueueDatabase queueDB = QueueDatabase.Open(
+ dbFileName, queueDBConfig);
+
+ SecondaryQueueDatabaseConfig secDBConfig =
+ new SecondaryQueueDatabaseConfig(queueDB, null);
+
+ Config(xmlElem, ref secDBConfig, true);
+ Confirm(xmlElem, secDBConfig, true);
+
+ // Close the primary btree database.
+ queueDB.Close();
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ SecondaryQueueDatabaseConfig secQueueDBConfig,
+ bool compulsory)
+ {
+ SecondaryDatabaseConfig secDBConfig =
+ secQueueDBConfig;
+ SecondaryDatabaseConfigTest.Confirm(xmlElement,
+ secDBConfig, compulsory);
+
+ // Confirm secondary hash database specific configuration.
+ Configuration.ConfirmCreatePolicy(xmlElement,
+ "Creation", secQueueDBConfig.Creation, compulsory);
+ Configuration.ConfirmUint(xmlElement,
+ "ExtentSize", secQueueDBConfig.ExtentSize, compulsory);
+ Configuration.ConfirmUint(xmlElement, "Length",
+ secQueueDBConfig.Length, compulsory);
+ Configuration.ConfirmInt(xmlElement, "PadByte",
+ secQueueDBConfig.PadByte, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref SecondaryQueueDatabaseConfig secQueueDBConfig,
+ bool compulsory)
+ {
+ uint uintValue = new uint();
+ int intValue = new int();
+ SecondaryDatabaseConfig secConfig = secQueueDBConfig;
+ SecondaryDatabaseConfigTest.Config(xmlElement,
+ ref secConfig, compulsory);
+
+ // Configure specific fields/properties of Queue database
+ Configuration.ConfigCreatePolicy(xmlElement, "Creation",
+ ref secQueueDBConfig.Creation, compulsory);
+ if (Configuration.ConfigUint(xmlElement, "Length",
+ ref uintValue, compulsory))
+ secQueueDBConfig.Length = uintValue;
+ if (Configuration.ConfigInt(xmlElement, "PadByte",
+ ref intValue, compulsory))
+ secQueueDBConfig.PadByte = intValue;
+ if (Configuration.ConfigUint(xmlElement, "ExtentSize",
+ ref uintValue, compulsory))
+ secQueueDBConfig.ExtentSize = uintValue;
+ }
+ }
+}
+
diff --git a/db-4.8.30/test/scr037/SecondaryQueueDatabaseTest.cs b/db-4.8.30/test/scr037/SecondaryQueueDatabaseTest.cs
new file mode 100644
index 0000000..7a6a0a3
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryQueueDatabaseTest.cs
@@ -0,0 +1,227 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryQueueDatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryQueueDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ /*
+ * Tests to all Open() share the same configuration in
+ * AllTestData.xml.
+ */
+ [Test]
+ public void TestOpen()
+ {
+ testName = "TestOpen";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecQueueDB(testFixtureName, "TestOpen",
+ dbFileName, dbSecFileName);
+ }
+
+ public void OpenSecQueueDB(string className,
+ string funName, string dbFileName, string dbSecFileName)
+ {
+ XmlElement xmlElem = Configuration.TestSetUp(
+ className, funName);
+
+ // Open a primary queue database.
+ QueueDatabaseConfig primaryDBConfig =
+ new QueueDatabaseConfig();
+ primaryDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ QueueDatabase primaryDB;
+
+ /*
+ * If secondary database name is given, the primary
+ * database is also opened with database name.
+ */
+ primaryDB = QueueDatabase.Open(dbFileName,
+ primaryDBConfig);
+
+ try
+ {
+ // Open a new secondary database.
+ SecondaryQueueDatabaseConfig secQueueDBConfig =
+ new SecondaryQueueDatabaseConfig(
+ primaryDB, null);
+ SecondaryQueueDatabaseConfigTest.Config(
+ xmlElem, ref secQueueDBConfig, false);
+ secQueueDBConfig.Creation =
+ CreatePolicy.IF_NEEDED;
+
+ SecondaryQueueDatabase secQueueDB;
+ secQueueDB = SecondaryQueueDatabase.Open(
+ dbSecFileName, secQueueDBConfig);
+
+ // Close the secondary database.
+ secQueueDB.Close();
+
+ // Open the existing secondary database.
+ SecondaryDatabaseConfig secDBConfig =
+ new SecondaryQueueDatabaseConfig(
+ primaryDB, null);
+
+ SecondaryDatabase secDB;
+ secDB = SecondaryQueueDatabase.Open(
+ dbSecFileName, secDBConfig);
+
+ // Close secondary database.
+ secDB.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ // Close primary database.
+ primaryDB.Close();
+ }
+ }
+
+ [Test]
+ public void TestOpenWithinTxn()
+ {
+ testName = "TestOpenWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecQueueDBWithinTxn(testFixtureName,
+ "TestOpen", testHome, dbFileName,
+ dbSecFileName);
+ }
+
+ [Test]
+ public void TestOpenDBNameWithinTxn()
+ {
+ testName = "TestOpenDBNameWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecQueueDBWithinTxn(testFixtureName,
+ "TestOpen", testHome, dbFileName,
+ dbSecFileName);
+ }
+
+ public void OpenSecQueueDBWithinTxn(string className,
+ string funName, string home, string dbFileName,
+ string dbSecFileName)
+ {
+ XmlElement xmlElem = Configuration.TestSetUp(
+ className, funName);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ // Open a primary queue database.
+ Transaction openDBTxn = env.BeginTransaction();
+ QueueDatabaseConfig dbConfig =
+ new QueueDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ QueueDatabase db = QueueDatabase.Open(
+ dbFileName, dbConfig, openDBTxn);
+ openDBTxn.Commit();
+
+ // Open a secondary queue database.
+ Transaction openSecTxn = env.BeginTransaction();
+ SecondaryQueueDatabaseConfig secDBConfig =
+ new SecondaryQueueDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ SecondaryQueueDatabaseConfigTest.Config(xmlElem,
+ ref secDBConfig, true);
+ secDBConfig.Env = env;
+ SecondaryQueueDatabase secDB;
+ secDB = SecondaryQueueDatabase.Open(
+ dbSecFileName, secDBConfig, openSecTxn);
+
+ openSecTxn.Commit();
+
+ // Confirm its flags configured in secDBConfig.
+ Confirm(xmlElem, secDB, true);
+ secDB.Close();
+
+ // Open the existing secondary database.
+ Transaction secTxn = env.BeginTransaction();
+ SecondaryDatabaseConfig secConfig =
+ new SecondaryDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secConfig.Env = env;
+
+ SecondaryDatabase secExDB;
+ secExDB = SecondaryQueueDatabase.Open(
+ dbSecFileName, secConfig, secTxn);
+
+ secExDB.Close();
+ secTxn.Commit();
+
+ db.Close();
+ env.Close();
+ }
+
+ public DatabaseEntry SecondaryKeyGen(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ DatabaseEntry dbtGen;
+ dbtGen = new DatabaseEntry(data.Data);
+ return dbtGen;
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ SecondaryQueueDatabase secDB, bool compulsory)
+ {
+ Configuration.ConfirmUint(xmlElem,
+ "ExtentSize", secDB.ExtentSize, compulsory);
+ Configuration.ConfirmUint(xmlElem, "Length",
+ secDB.Length, compulsory);
+ Configuration.ConfirmInt(xmlElem, "PadByte",
+ secDB.PadByte, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SecondaryRecnoDatabaseConfigTest.cs b/db-4.8.30/test/scr037/SecondaryRecnoDatabaseConfigTest.cs
new file mode 100644
index 0000000..fe1159f
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryRecnoDatabaseConfigTest.cs
@@ -0,0 +1,119 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryRecnoDatabaseConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryRecnoDatabaseConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ // Open a primary btree database.
+ RecnoDatabaseConfig recDBConfig =
+ new RecnoDatabaseConfig();
+ recDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ RecnoDatabase recDB = RecnoDatabase.Open(
+ dbFileName, recDBConfig);
+
+ SecondaryRecnoDatabaseConfig secDBConfig =
+ new SecondaryRecnoDatabaseConfig(recDB, null);
+
+ Config(xmlElem, ref secDBConfig, true);
+ Confirm(xmlElem, secDBConfig, true);
+
+ // Close the primary btree database.
+ recDB.Close();
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ SecondaryRecnoDatabaseConfig secRecDBConfig,
+ bool compulsory)
+ {
+ SecondaryDatabaseConfig secDBConfig =
+ secRecDBConfig;
+ SecondaryDatabaseConfigTest.Confirm(xmlElement,
+ secDBConfig, compulsory);
+
+ // Confirm secondary hash database specific configuration.
+ Configuration.ConfirmString(xmlElement, "BackingFile",
+ secRecDBConfig.BackingFile, compulsory);
+ Configuration.ConfirmCreatePolicy(xmlElement, "Creation",
+ secRecDBConfig.Creation, compulsory);
+ Configuration.ConfirmInt(xmlElement, "Delimiter",
+ secRecDBConfig.Delimiter, compulsory);
+ Configuration.ConfirmUint(xmlElement, "Length",
+ secRecDBConfig.Length, compulsory);
+ Configuration.ConfirmInt(xmlElement, "PadByte",
+ secRecDBConfig.PadByte, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Renumber",
+ secRecDBConfig.Renumber, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Snapshot",
+ secRecDBConfig.Snapshot, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref SecondaryRecnoDatabaseConfig secRecDBConfig,
+ bool compulsory)
+ {
+ int intValue = new int();
+ uint uintValue = new uint();
+ SecondaryDatabaseConfig secDBConfig = secRecDBConfig;
+ SecondaryDatabaseConfigTest.Config(xmlElement,
+ ref secDBConfig, compulsory);
+
+ // Configure specific fields/properties of Recno database
+ Configuration.ConfigCreatePolicy(xmlElement, "Creation",
+ ref secRecDBConfig.Creation, compulsory);
+ if (Configuration.ConfigInt(xmlElement, "Delimiter",
+ ref intValue, compulsory))
+ secRecDBConfig.Delimiter = intValue;
+ if (Configuration.ConfigUint(xmlElement, "Length",
+ ref uintValue, compulsory))
+ secRecDBConfig.Length = uintValue;
+ if (Configuration.ConfigInt(xmlElement, "PadByte",
+ ref intValue, compulsory))
+ secRecDBConfig.PadByte = intValue;
+ Configuration.ConfigBool(xmlElement, "Renumber",
+ ref secRecDBConfig.Renumber, compulsory);
+ Configuration.ConfigBool(xmlElement, "Snapshot",
+ ref secRecDBConfig.Snapshot, compulsory);
+ }
+ }
+}
+
diff --git a/db-4.8.30/test/scr037/SecondaryRecnoDatabaseTest.cs b/db-4.8.30/test/scr037/SecondaryRecnoDatabaseTest.cs
new file mode 100644
index 0000000..ac22115
--- /dev/null
+++ b/db-4.8.30/test/scr037/SecondaryRecnoDatabaseTest.cs
@@ -0,0 +1,269 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SecondaryRecnoDatabaseTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SecondaryRecnoDatabaseTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ /*
+ * Tests to all Open() share the same configuration in
+ * AllTestData.xml.
+ */
+ [Test]
+ public void TestOpen()
+ {
+ testName = "TestOpen";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecRecnoDB(testFixtureName, "TestOpen",
+ dbFileName, dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestOpenWithDBName()
+ {
+ testName = "TestOpenWithDBName";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ string dbSecFileName = testHome + "/" +
+ testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecRecnoDB(testFixtureName, "TestOpen",
+ dbFileName, dbSecFileName, true);
+ }
+
+ public void OpenSecRecnoDB(string className,
+ string funName, string dbFileName, string dbSecFileName,
+ bool ifDBName)
+ {
+ XmlElement xmlElem = Configuration.TestSetUp(
+ className, funName);
+
+ // Open a primary recno database.
+ RecnoDatabaseConfig primaryDBConfig =
+ new RecnoDatabaseConfig();
+ primaryDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ RecnoDatabase primaryDB;
+
+ /*
+ * If secondary database name is given, the primary
+ * database is also opened with database name.
+ */
+ if (ifDBName == false)
+ primaryDB = RecnoDatabase.Open(dbFileName,
+ primaryDBConfig);
+ else
+ primaryDB = RecnoDatabase.Open(dbFileName,
+ "primary", primaryDBConfig);
+
+ try
+ {
+ // Open a new secondary database.
+ SecondaryRecnoDatabaseConfig secRecnoDBConfig =
+ new SecondaryRecnoDatabaseConfig(
+ primaryDB, null);
+ SecondaryRecnoDatabaseConfigTest.Config(
+ xmlElem, ref secRecnoDBConfig, false);
+ secRecnoDBConfig.Creation =
+ CreatePolicy.IF_NEEDED;
+
+ SecondaryRecnoDatabase secRecnoDB;
+ if (ifDBName == false)
+ secRecnoDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, secRecnoDBConfig);
+ else
+ secRecnoDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, "secondary",
+ secRecnoDBConfig);
+
+ // Close the secondary database.
+ secRecnoDB.Close();
+
+ // Open the existing secondary database.
+ SecondaryDatabaseConfig secDBConfig =
+ new SecondaryDatabaseConfig(
+ primaryDB, null);
+
+ SecondaryDatabase secDB;
+ if (ifDBName == false)
+ secDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, secDBConfig);
+ else
+ secDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, "secondary", secDBConfig);
+
+ // Close secondary database.
+ secDB.Close();
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ // Close primary database.
+ primaryDB.Close();
+ }
+ }
+
+ [Test]
+ public void TestOpenWithinTxn()
+ {
+ testName = "TestOpenWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecRecnoDBWithinTxn(testFixtureName,
+ "TestOpen", testHome, dbFileName,
+ dbSecFileName, false);
+ }
+
+ [Test]
+ public void TestOpenDBNameWithinTxn()
+ {
+ testName = "TestOpenDBNameWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+ string dbSecFileName = testName + "_sec.db";
+
+ Configuration.ClearDir(testHome);
+
+ OpenSecRecnoDBWithinTxn(testFixtureName,
+ "TestOpen", testHome, dbFileName,
+ dbSecFileName, true);
+ }
+
+ public void OpenSecRecnoDBWithinTxn(string className,
+ string funName, string home, string dbFileName,
+ string dbSecFileName, bool ifDbName)
+ {
+ XmlElement xmlElem = Configuration.TestSetUp(
+ className, funName);
+
+ // Open an environment.
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(
+ home, envConfig);
+
+ // Open a primary recno database.
+ Transaction openDBTxn = env.BeginTransaction();
+ RecnoDatabaseConfig dbConfig =
+ new RecnoDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+
+ dbConfig.Env = env;
+ RecnoDatabase db = RecnoDatabase.Open(
+ dbFileName, dbConfig, openDBTxn);
+ openDBTxn.Commit();
+
+ // Open a secondary recno database.
+ Transaction openSecTxn = env.BeginTransaction();
+ SecondaryRecnoDatabaseConfig secDBConfig =
+ new SecondaryRecnoDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ SecondaryRecnoDatabaseConfigTest.Config(xmlElem,
+ ref secDBConfig, false);
+ secDBConfig.Env = env;
+ SecondaryRecnoDatabase secDB;
+ if (ifDbName == false)
+ secDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, secDBConfig, openSecTxn);
+ else
+ secDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, "secondary", secDBConfig,
+ openSecTxn);
+ openSecTxn.Commit();
+
+ // Confirm its flags configured in secDBConfig.
+ Confirm(xmlElem, secDB, true);
+ secDB.Close();
+
+ // Open the existing secondary database.
+ Transaction secTxn = env.BeginTransaction();
+ SecondaryDatabaseConfig secConfig =
+ new SecondaryDatabaseConfig(db,
+ new SecondaryKeyGenDelegate(SecondaryKeyGen));
+ secConfig.Env = env;
+
+ SecondaryDatabase secExDB;
+ if (ifDbName == false)
+ secExDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, secConfig, secTxn);
+ else
+ secExDB = SecondaryRecnoDatabase.Open(
+ dbSecFileName, "secondary", secConfig,
+ secTxn);
+ secExDB.Close();
+ secTxn.Commit();
+
+ db.Close();
+ env.Close();
+ }
+
+ public DatabaseEntry SecondaryKeyGen(
+ DatabaseEntry key, DatabaseEntry data)
+ {
+ DatabaseEntry dbtGen;
+ dbtGen = new DatabaseEntry(data.Data);
+ return dbtGen;
+ }
+
+ public static void Confirm(XmlElement xmlElem,
+ SecondaryRecnoDatabase secDB, bool compulsory)
+ {
+ Configuration.ConfirmInt(xmlElem, "Delimiter",
+ secDB.Delimiter, compulsory);
+ Configuration.ConfirmUint(xmlElem, "Length",
+ secDB.Length, compulsory);
+ Configuration.ConfirmInt(xmlElem, "PadByte",
+ secDB.PadByte, compulsory);
+ Configuration.ConfirmBool(xmlElem,
+ "Renumber", secDB.Renumber, compulsory);
+ Configuration.ConfirmBool(xmlElem, "Snapshot",
+ secDB.Snapshot, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SequenceConfigTest.cs b/db-4.8.30/test/scr037/SequenceConfigTest.cs
new file mode 100644
index 0000000..d8e4bc0
--- /dev/null
+++ b/db-4.8.30/test/scr037/SequenceConfigTest.cs
@@ -0,0 +1,132 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SequenceConfigTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SequenceConfigTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+
+ SequenceConfig seqConfig = new SequenceConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ Config(xmlElem, ref seqConfig, true);
+ Confirm(xmlElem, seqConfig, true);
+ }
+
+ [Test]
+ public void TestConfigObj()
+ {
+ testName = "TestConfigObj";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ /* Configure and initialize sequence. */
+ SequenceConfig seqConfig = new SequenceConfig();
+ seqConfig.BackingDatabase = btreeDB;
+ seqConfig.Creation = CreatePolicy.IF_NEEDED;
+ seqConfig.key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ seqConfig.SetRange(Int64.MinValue, Int64.MaxValue);
+ Sequence seq = new Sequence(seqConfig);
+
+ // Confirm the objects set in SequenceConfig.
+ Assert.AreEqual(dbFileName,
+ seq.BackingDatabase.FileName);
+ Assert.AreEqual(ASCIIEncoding.ASCII.GetBytes("key"),
+ seq.Key.Data);
+ Assert.AreEqual(Int64.MinValue, seq.Min);
+ Assert.AreEqual(Int64.MaxValue, seq.Max);
+
+ /* Close sequence, database and environment. */
+ seq.Close();
+ btreeDB.Close();
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ SequenceConfig seqConfig, bool compulsory)
+ {
+ Configuration.ConfirmInt(xmlElement, "CacheSize",
+ seqConfig.CacheSize, compulsory);
+ Configuration.ConfirmCreatePolicy(xmlElement, "Creation",
+ seqConfig.Creation, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Decrement",
+ seqConfig.Decrement, compulsory);
+ Configuration.ConfirmBool(xmlElement, "FreeThreaded",
+ seqConfig.FreeThreaded, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Increment",
+ seqConfig.Increment, compulsory);
+ Configuration.ConfirmLong(xmlElement, "InitialValue",
+ seqConfig.InitialValue, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Wrap",
+ seqConfig.Wrap, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref SequenceConfig seqConfig, bool compulsory)
+ {
+ int intValue = new int();
+ bool boolValue = new bool();
+ long longValue = new long();
+
+ if (Configuration.ConfigInt(xmlElement, "CacheSize",
+ ref intValue, compulsory))
+ seqConfig.CacheSize = intValue;
+ Configuration.ConfigCreatePolicy(xmlElement, "Creation",
+ ref seqConfig.Creation, compulsory);
+ if (Configuration.ConfigBool(xmlElement, "Decrement",
+ ref boolValue, compulsory))
+ seqConfig.Decrement = boolValue;
+ Configuration.ConfigBool(xmlElement, "FreeThreaded",
+ ref seqConfig.FreeThreaded, compulsory);
+ if (Configuration.ConfigBool(xmlElement, "Increment",
+ ref boolValue, compulsory))
+ seqConfig.Increment = boolValue;
+ if (Configuration.ConfigLong(xmlElement, "InitialValue",
+ ref longValue, compulsory))
+ seqConfig.InitialValue = longValue;
+ Configuration.ConfigBool(xmlElement, "Wrap",
+ ref seqConfig.Wrap, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/SequenceTest.cs b/db-4.8.30/test/scr037/SequenceTest.cs
new file mode 100644
index 0000000..92f8cc0
--- /dev/null
+++ b/db-4.8.30/test/scr037/SequenceTest.cs
@@ -0,0 +1,406 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class SequenceTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "SequenceTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ Configuration.ClearDir(testFixtureHome);
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(
+ dbFileName, btreeDBConfig);
+
+ // Configure and initialize sequence.
+ SequenceConfig seqConfig = new SequenceConfig();
+ seqConfig.BackingDatabase = btreeDB;
+ seqConfig.Creation = CreatePolicy.IF_NEEDED;
+ seqConfig.key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ seqConfig.SetRange(Int64.MinValue, Int64.MaxValue);
+ SequenceConfigTest.Config(xmlElem, ref seqConfig, true);
+ Sequence seq = new Sequence(seqConfig);
+
+
+ /*
+ * Confirm that the squence is opened with the
+ * configuration that we set.
+ */
+ Confirm(xmlElem, seq, true);
+
+ /* Close sequence, database and environment. */
+ seq.Close();
+ btreeDB.Close();
+ }
+
+ [Test]
+ public void TestClose()
+ {
+ testName = "TestClose";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ Sequence seq;
+ OpenNewSequence(dbFileName, out db, out seq);
+ seq.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestDispose()
+ {
+ testName = "TestDispose";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ BTreeDatabase db;
+ Sequence seq;
+ OpenNewSequence(dbFileName, out db, out seq);
+ seq.Dispose();
+ db.Close();
+ }
+
+ [Test]
+ public void TestGet()
+ {
+ testName = "TestGet";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and an increase sequence.
+ BTreeDatabase db;
+ Sequence seq;
+ OpenNewSequence(dbFileName, out db, out seq);
+
+ /*
+ * Check the delta of two sequence number get
+ * from sequence.
+ */
+ int delta = 100;
+ long seqNum1 = seq.Get(delta);
+ long seqNum2 = seq.Get(delta);
+ Assert.AreEqual(delta, seqNum2 - seqNum1);
+
+ // Close all.
+ seq.Close();
+ db.Close();
+ }
+
+ [Test]
+ public void TestGetWithNoSync()
+ {
+ testName = "TestGetWithNoSync";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and an increase sequence.
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ Sequence seq;
+ OpenNewSequenceInEnv(testHome, testName, out env,
+ out db, out seq);
+
+ /*
+ * Check the delta of two sequence number get
+ * from sequence.
+ */
+ int delta = 100;
+ long seqNum1 = seq.Get(delta, true);
+ long seqNum2 = seq.Get(delta, true);
+ Assert.AreEqual(delta, seqNum2 - seqNum1);
+
+ // Close all.
+ seq.Close();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestGetWithinTxn()
+ {
+ testName = "TestGetWithinTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and an increase sequence.
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ Sequence seq;
+ OpenNewSequenceInEnv(testHome, testName, out env,
+ out db, out seq);
+
+ /*
+ * Check the delta of two sequence number get
+ * from sequence.
+ */
+ int delta = 100;
+ Transaction txn = env.BeginTransaction();
+ long seqNum1 = seq.Get(delta, txn);
+ long seqNum2 = seq.Get(delta, txn);
+ Assert.AreEqual(delta, seqNum2 - seqNum1);
+ txn.Commit();
+
+ // Close all.
+ seq.Close();
+ db.Close();
+ env.Close();
+ }
+
+
+ [Test]
+ public void TestRemove()
+ {
+ testName = "TestRemove";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and an increase sequence.
+ BTreeDatabase db;
+ Sequence seq;
+ OpenNewSequence(dbFileName, out db, out seq);
+
+ /*
+ * Remove the sequence. The sequence handle can not
+ * be accessed again.
+ */
+ seq.Remove();
+ db.Close();
+ }
+
+ [Test]
+ public void TestRemoveWithNoSync()
+ {
+ testName = "TestRemoveWithNoSync";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and an increase sequence.
+ DatabaseEnvironmentConfig envConfig = new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ DatabaseEnvironment env = DatabaseEnvironment.Open(testHome, envConfig);
+
+ /* Configure and open sequence's database. */
+ BTreeDatabaseConfig btreeDBConfig = new BTreeDatabaseConfig();
+ btreeDBConfig.AutoCommit = true;
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ btreeDBConfig.Env = env;
+ BTreeDatabase btreeDB = BTreeDatabase.Open(dbFileName, btreeDBConfig);
+
+ /* Configure and initialize sequence. */
+ SequenceConfig seqConfig = new SequenceConfig();
+ seqConfig.BackingDatabase = btreeDB;
+ seqConfig.Creation = CreatePolicy.IF_NEEDED;
+ seqConfig.Increment = true;
+ seqConfig.InitialValue = Int64.MaxValue;
+ seqConfig.key = new DatabaseEntry();
+ seqConfig.SetRange(Int64.MinValue, Int64.MaxValue);
+ seqConfig.Wrap = true;
+ seqConfig.key = new DatabaseEntry();
+ seqConfig.key.Data = ASCIIEncoding.ASCII.GetBytes("ex_csharp_sequence");
+ Sequence seq = new Sequence(seqConfig);
+
+ /*
+ * Remove the sequence. The sequence handle can not
+ * be accessed again.
+ */
+ seq.Remove(true);
+ btreeDB.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestRemoveWithInTxn()
+ {
+ testName = "TestRemoveWithInTxn";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and an increase sequence.
+ BTreeDatabase db;
+ DatabaseEnvironment env;
+ Sequence seq;
+ OpenNewSequenceInEnv(testHome, testName, out env,
+ out db, out seq);
+
+ //Remove the sequence.
+ Transaction txn = env.BeginTransaction();
+ seq.Remove(txn);
+ txn.Commit();
+ db.Close();
+ env.Close();
+ }
+
+ [Test]
+ public void TestStats()
+ {
+ testName = "TestStats";
+ testHome = testFixtureHome + "/" + testName;
+ string dbFileName = testHome + "/" + testName + ".db";
+
+ Configuration.ClearDir(testHome);
+
+ // Open a database and an increase sequence.
+ BTreeDatabase db;
+ Sequence seq;
+ OpenNewSequence(dbFileName, out db, out seq);
+
+ // Get a value from sequence.
+ seq.Get(100);
+
+ // Get sequence statistics.
+ SequenceStats stats = seq.Stats();
+ seq.PrintStats(true);
+ Assert.AreEqual(200, stats.CachedValue);
+ Assert.AreEqual(1000, stats.CacheSize);
+ Assert.AreNotEqual(0, stats.Flags);
+ Assert.AreEqual(1099, stats.LastCachedValue);
+ Assert.AreEqual(Int64.MaxValue, stats.Max);
+ Assert.AreEqual(Int64.MinValue, stats.Min);
+ Assert.AreEqual(1100, stats.StoredValue);
+
+ stats = seq.Stats(true);
+ seq.PrintStats();
+ stats = seq.Stats();
+ Assert.AreEqual(0, stats.LockNoWait);
+ Assert.AreEqual(0, stats.LockWait);
+
+ seq.Close();
+ db.Close();
+ }
+
+ public void OpenNewSequence(string dbFileName,
+ out BTreeDatabase db, out Sequence seq)
+ {
+ // Open a database.
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ db = BTreeDatabase.Open(dbFileName, btreeDBConfig);
+
+ // Configure and initialize sequence.
+ SequenceConfig seqConfig = new SequenceConfig();
+ seqConfig.BackingDatabase = db;
+ seqConfig.CacheSize = 1000;
+ seqConfig.Creation = CreatePolicy.ALWAYS;
+ seqConfig.Decrement = false;
+ seqConfig.FreeThreaded = true;
+ seqConfig.Increment = true;
+ seqConfig.InitialValue = 100;
+ seqConfig.key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ seqConfig.SetRange(Int64.MinValue, Int64.MaxValue);
+ seqConfig.Wrap = true;
+ seq = new Sequence(seqConfig);
+ }
+
+ public void OpenNewSequenceInEnv(string home, string dbname,
+ out DatabaseEnvironment env, out BTreeDatabase db,
+ out Sequence seq)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.UseLogging = true;
+ env = DatabaseEnvironment.Open(home, envConfig);
+
+ Transaction openTxn = env.BeginTransaction();
+ BTreeDatabaseConfig dbConfig =
+ new BTreeDatabaseConfig();
+ dbConfig.Creation = CreatePolicy.IF_NEEDED;
+ dbConfig.Env = env;
+ db = BTreeDatabase.Open(dbname + ".db", dbConfig,
+ openTxn);
+ openTxn.Commit();
+
+ Transaction seqTxn = env.BeginTransaction();
+ SequenceConfig seqConfig = new SequenceConfig();
+ seqConfig.BackingDatabase = db;
+ seqConfig.Creation = CreatePolicy.ALWAYS;
+ seqConfig.Decrement = false;
+ seqConfig.FreeThreaded = true;
+ seqConfig.Increment = true;
+ seqConfig.InitialValue = 0;
+ seqConfig.key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ seqConfig.SetRange(Int64.MinValue, Int64.MaxValue);
+ seqConfig.Wrap = true;
+ seq = new Sequence(seqConfig);
+ seqTxn.Commit();
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ Sequence seq, bool compulsory)
+ {
+ Configuration.ConfirmInt(xmlElement, "CacheSize",
+ seq.Cachesize, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Decrement",
+ seq.Decrement, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Increment",
+ seq.Increment, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Wrap",
+ seq.Wrap, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/TestException.cs b/db-4.8.30/test/scr037/TestException.cs
new file mode 100644
index 0000000..adf6e75
--- /dev/null
+++ b/db-4.8.30/test/scr037/TestException.cs
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace CsharpAPITest
+{
+
+ public class TestException : ApplicationException
+ {
+ public TestException(string name)
+ : base(name)
+ {
+ }
+
+ public TestException() : base()
+ {
+ }
+ }
+
+ public class ConfigNotFoundException : TestException
+ {
+ public ConfigNotFoundException(string name)
+ : base(name)
+ {
+ }
+ }
+
+ public class InvalidConfigException : TestException
+ {
+ public InvalidConfigException(string name)
+ : base(name)
+ {
+ }
+ }
+
+ public class ExpectedTestException : TestException
+ {
+ public ExpectedTestException()
+ : base()
+ {
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/TransactionConfigTest.cs b/db-4.8.30/test/scr037/TransactionConfigTest.cs
new file mode 100644
index 0000000..8f84da2
--- /dev/null
+++ b/db-4.8.30/test/scr037/TransactionConfigTest.cs
@@ -0,0 +1,73 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class TransactionConfigTest
+ {
+ private string testFixtureName;
+ private string testName;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "TransactionConfigTest";
+ }
+
+ [Test]
+ public void TestConfig()
+ {
+ testName = "TestConfig";
+ /*
+ * Configure the fields/properties and see if
+ * they are updated successfully.
+ */
+ TransactionConfig txnConfig = new TransactionConfig();
+ XmlElement xmlElem = Configuration.TestSetUp(
+ testFixtureName, testName);
+ Config(xmlElem, ref txnConfig, true);
+ Confirm(xmlElem, txnConfig, true);
+ }
+
+ public static void Confirm(XmlElement xmlElement,
+ TransactionConfig txnConfig, bool compulsory)
+ {
+ Configuration.ConfirmIsolation(xmlElement,
+ "IsolationDegree", txnConfig.IsolationDegree,
+ compulsory);
+ Configuration.ConfirmBool(xmlElement, "NoWait",
+ txnConfig.NoWait, compulsory);
+ Configuration.ConfirmBool(xmlElement, "Snapshot",
+ txnConfig.Snapshot, compulsory);
+ Configuration.ConfirmLogFlush(xmlElement, "SyncAction",
+ txnConfig.SyncAction, compulsory);
+ }
+
+ public static void Config(XmlElement xmlElement,
+ ref TransactionConfig txnConfig, bool compulsory)
+ {
+ Configuration.ConfigIsolation(xmlElement,
+ "IsolationDegree", ref txnConfig.IsolationDegree,
+ compulsory);
+ Configuration.ConfigBool(xmlElement, "NoWait",
+ ref txnConfig.NoWait, compulsory);
+ Configuration.ConfigBool(xmlElement, "Snapshot",
+ ref txnConfig.Snapshot, compulsory);
+ Configuration.ConfigLogFlush(xmlElement, "SyncAction",
+ ref txnConfig.SyncAction, compulsory);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/TransactionTest.cs b/db-4.8.30/test/scr037/TransactionTest.cs
new file mode 100644
index 0000000..23ca34c
--- /dev/null
+++ b/db-4.8.30/test/scr037/TransactionTest.cs
@@ -0,0 +1,435 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Xml;
+using NUnit.Framework;
+using BerkeleyDB;
+
+namespace CsharpAPITest
+{
+ [TestFixture]
+ public class TransactionTest
+ {
+ private string testFixtureHome;
+ private string testFixtureName;
+ private string testName;
+ private string testHome;
+
+ private DatabaseEnvironment deadLockEnv;
+
+ [TestFixtureSetUp]
+ public void RunBeforeTests()
+ {
+ testFixtureName = "TransactionTest";
+ testFixtureHome = "./TestOut/" + testFixtureName;
+
+ DatabaseEnvironment.Remove(testFixtureHome);
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestAbort()
+ {
+ testName = "TestAbort";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironment env;
+ Transaction txn;
+ BTreeDatabase db;
+
+ /*
+ * Open an environment and begin a transaction. Open
+ * a db and write a record the db within this transaction.
+ */
+ PutRecordWithTxn(out env, testHome, testName, out txn);
+
+ // Abort the transaction.
+ txn.Abort();
+
+ /*
+ * Undo all operations in the transaction so the
+ * database couldn't be reopened.
+ */
+ try
+ {
+ OpenBtreeDBInEnv(testName + ".db", env,
+ out db, false, null);
+ }
+ catch (DatabaseException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestCommit()
+ {
+ testName = "TestCommit";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ DatabaseEnvironment env;
+ Transaction txn;
+ BTreeDatabase db;
+
+ /*
+ * Open an environment and begin a transaction. Open
+ * a db and write a record the db within this transaction.
+ */
+ PutRecordWithTxn(out env, testHome, testName, out txn);
+
+ // Commit the transaction.
+ txn.Commit();
+
+ // Reopen the database.
+ OpenBtreeDBInEnv(testName + ".db", env,
+ out db, false, null);
+
+ /*
+ * Confirm that the record("key", "data") exists in the
+ * database.
+ */
+ try
+ {
+ db.GetBoth(new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key")),
+ new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data")));
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ db.Close();
+ env.Close();
+ }
+ }
+
+ [Test, ExpectedException(typeof(ExpectedTestException))]
+ public void TestDiscard()
+ {
+ DatabaseEnvironment env;
+ byte[] gid;
+
+ testName = "TestDiscard";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open an environment and begin a transaction
+ * called "transaction". Within the transacion, open a
+ * database, write a record and close it. Then prepare
+ * the transaction and panic the environment.
+ */
+ PanicPreparedTxn(testHome, testName, out env, out gid);
+
+ /*
+ * Recover the environment. Log and db files are not
+ * destoyed so run normal recovery. Recovery should
+ * use DB_CREATE and DB_INIT_TXN flags when
+ * opening the environment.
+ */
+ DatabaseEnvironmentConfig envConfig
+ = new DatabaseEnvironmentConfig();
+ envConfig.RunRecovery = true;
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+
+ PreparedTransaction[] preparedTxns
+ = new PreparedTransaction[10];
+ preparedTxns = env.Recover(10, true);
+
+ Assert.AreEqual(gid, preparedTxns[0].GlobalID);
+ preparedTxns[0].Txn.Discard();
+ try
+ {
+ preparedTxns[0].Txn.Commit();
+ }
+ catch (AccessViolationException)
+ {
+ throw new ExpectedTestException();
+ }
+ finally
+ {
+ env.Close();
+ }
+ }
+
+ [Test]
+ public void TestPrepare()
+ {
+ testName = "TestPrepare";
+ testHome = testFixtureHome + "/" + testName;
+
+ DatabaseEnvironment env;
+ byte[] gid;
+
+ Configuration.ClearDir(testHome);
+
+ /*
+ * Open an environment and begin a transaction
+ * called "transaction". Within the transacion, open a
+ * database, write a record and close it. Then prepare
+ * the transaction and panic the environment.
+ */
+ PanicPreparedTxn(testHome, testName, out env, out gid);
+
+ /*
+ * Recover the environment. Log and db files are not
+ * destoyed so run normal recovery. Recovery should
+ * use DB_CREATE and DB_INIT_TXN flags when
+ * opening the environment.
+ */
+ DatabaseEnvironmentConfig envConfig
+ = new DatabaseEnvironmentConfig();
+ envConfig.RunRecovery = true;
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ env = DatabaseEnvironment.Open(testHome, envConfig);
+
+ // Reopen the database.
+ BTreeDatabase db;
+ OpenBtreeDBInEnv(testName + ".db", env, out db,
+ false, null);
+
+ /*
+ * Confirm that record("key", "data") exists in the
+ * database.
+ */
+ DatabaseEntry key, data;
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ try
+ {
+ db.GetBoth(key, data);
+ }
+ catch (DatabaseException)
+ {
+ throw new TestException();
+ }
+ finally
+ {
+ db.Close();
+ env.Close();
+ }
+
+ }
+
+ public void PanicPreparedTxn(string home, string dbName,
+ out DatabaseEnvironment env, out byte[] globalID)
+ {
+ Transaction txn;
+
+ // Put record into database within transaction.
+ PutRecordWithTxn(out env, home, dbName, out txn);
+
+ /*
+ * Generate global ID for the transaction. Copy
+ * transaction ID to the first 4 tyes in global ID.
+ */
+ globalID = new byte[Transaction.GlobalIdLength];
+ byte[] txnID = new byte[4];
+ txnID = BitConverter.GetBytes(txn.Id);
+ for (int i = 0; i < txnID.Length; i++)
+ globalID[i] = txnID[i];
+
+ // Prepare the transaction.
+ txn.Prepare(globalID);
+
+ // Panic the environment.
+ env.Panic();
+
+ }
+
+ [Test]
+ public void TestTxnName()
+ {
+ DatabaseEnvironment env;
+ Transaction txn;
+
+ testName = "TestTxnName";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ SetUpTransactionalEnv(testHome, out env);
+ txn = env.BeginTransaction();
+ txn.Name = testName;
+ Assert.AreEqual(testName, txn.Name);
+ txn.Commit();
+ env.Close();
+ }
+
+ [Test]
+ public void TestSetLockTimeout()
+ {
+ testName = "TestSetLockTimeout";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Set lock timeout.
+ TestTimeOut(true);
+
+ }
+
+ [Test]
+ public void TestSetTxnTimeout()
+ {
+ testName = "TestSetTxnTimeout";
+ testHome = testFixtureHome + "/" + testName;
+
+ Configuration.ClearDir(testHome);
+
+ // Set transaction time out.
+ TestTimeOut(false);
+
+ }
+
+ /*
+ * ifSetLock is used to indicate which timeout function
+ * is used, SetLockTimeout or SetTxnTimeout.
+ */
+ public void TestTimeOut(bool ifSetLock)
+ {
+ // Open environment and begin transaction.
+ Transaction txn;
+ deadLockEnv = null;
+ SetUpEnvWithTxnAndLocking(testHome,
+ out deadLockEnv, out txn, 0, 0, 0, 0);
+
+ // Define deadlock detection and resolve policy.
+ deadLockEnv.DeadlockResolution =
+ DeadlockPolicy.YOUNGEST;
+ if (ifSetLock == true)
+ txn.SetLockTimeout(10);
+ else
+ txn.SetTxnTimeout(10);
+
+ txn.Commit();
+ deadLockEnv.Close();
+ }
+
+ public static void SetUpEnvWithTxnAndLocking(string envHome,
+ out DatabaseEnvironment env, out Transaction txn,
+ uint maxLock, uint maxLocker, uint maxObject, uint partition)
+ {
+ // Configure env and locking subsystem.
+ LockingConfig lkConfig = new LockingConfig();
+
+ /*
+ * If the maximum number of locks/lockers/objects
+ * is given, then the LockingConfig is set. Unless,
+ * it is not set to any value.
+ */
+ if (maxLock != 0)
+ lkConfig.MaxLocks = maxLock;
+ if (maxLocker != 0)
+ lkConfig.MaxLockers = maxLocker;
+ if (maxObject != 0)
+ lkConfig.MaxObjects = maxObject;
+ if (partition != 0)
+ lkConfig.Partitions = partition;
+
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseTxns = true;
+ envConfig.UseMPool = true;
+ envConfig.LockSystemCfg = lkConfig;
+ envConfig.UseLocking = true;
+ envConfig.NoLocking = false;
+ env = DatabaseEnvironment.Open(envHome, envConfig);
+ txn = env.BeginTransaction();
+ }
+
+ public void PutRecordWithTxn(out DatabaseEnvironment env,
+ string home, string dbName, out Transaction txn)
+ {
+ BTreeDatabase db;
+
+ // Open a new environment and begin a transaction.
+ SetUpTransactionalEnv(home, out env);
+ TransactionConfig txnConfig = new TransactionConfig();
+ txnConfig.Name = "Transaction";
+ txn = env.BeginTransaction(txnConfig);
+ Assert.AreEqual("Transaction", txn.Name);
+
+ // Open a new database within the transaction.
+ OpenBtreeDBInEnv(dbName + ".db", env, out db, true, txn);
+
+ // Write to the database within the transaction.
+ WriteOneIntoBtreeDBWithTxn(db, txn);
+
+ // Close the database.
+ db.Close();
+ }
+
+ public void SetUpTransactionalEnv(string home,
+ out DatabaseEnvironment env)
+ {
+ DatabaseEnvironmentConfig envConfig =
+ new DatabaseEnvironmentConfig();
+ envConfig.Create = true;
+ envConfig.UseLogging = true;
+ envConfig.UseMPool = true;
+ envConfig.UseTxns = true;
+ env = DatabaseEnvironment.Open(
+ home, envConfig);
+ }
+
+ public void OpenBtreeDBInEnv(string dbName,
+ DatabaseEnvironment env, out BTreeDatabase db,
+ bool create, Transaction txn)
+ {
+ BTreeDatabaseConfig btreeDBConfig =
+ new BTreeDatabaseConfig();
+ btreeDBConfig.Env = env;
+ if (create == true)
+ btreeDBConfig.Creation = CreatePolicy.IF_NEEDED;
+ else
+ btreeDBConfig.Creation = CreatePolicy.NEVER;
+ if (txn == null)
+ db = BTreeDatabase.Open(dbName,
+ btreeDBConfig);
+ else
+ db = BTreeDatabase.Open(dbName,
+ btreeDBConfig, txn);
+ }
+
+ public void WriteOneIntoBtreeDBWithTxn(BTreeDatabase db,
+ Transaction txn)
+ {
+ DatabaseEntry key, data;
+
+ key = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("key"));
+ data = new DatabaseEntry(
+ ASCIIEncoding.ASCII.GetBytes("data"));
+ db.Put(key, data, txn);
+ }
+ }
+}
diff --git a/db-4.8.30/test/scr037/XMLReader.cs b/db-4.8.30/test/scr037/XMLReader.cs
new file mode 100644
index 0000000..6bdaa3e
--- /dev/null
+++ b/db-4.8.30/test/scr037/XMLReader.cs
@@ -0,0 +1,48 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2009 Oracle. All rights reserved.
+ *
+ */
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Xml;
+using System.Xml.XPath;
+
+namespace CsharpAPITest
+{
+ public class XMLReader
+ {
+ private static string path;
+
+ public XMLReader(string XmlFileName)
+ {
+ path = XmlFileName;
+ }
+
+ public XmlElement GetXmlElement(string className, string testName)
+ {
+ XmlDocument doc = new XmlDocument();
+ doc.Load(path);
+
+ string xpath = string.Format("/Assembly/TestFixture[@name=\"{0}\"]/Test[@name=\"{1}\"]", className, testName);
+ XmlElement testCase = doc.SelectSingleNode(xpath) as XmlElement;
+ if (testCase == null)
+ return null;
+ else
+ return testCase;
+ }
+
+ public static XmlNode GetNode(XmlElement xmlElement,
+ string nodeName)
+ {
+ XmlNodeList xmlNodeList = xmlElement.SelectNodes(nodeName);
+ if (xmlNodeList.Count > 1)
+ throw new Exception(nodeName + " Configuration Error");
+ else
+ return xmlNodeList.Item(0);
+ }
+
+ }
+}
diff --git a/db-4.8.30/test/scr037/bdb4.7.db b/db-4.8.30/test/scr037/bdb4.7.db
new file mode 100644
index 0000000..315e94a
--- /dev/null
+++ b/db-4.8.30/test/scr037/bdb4.7.db
Binary files differ
diff --git a/db-4.8.30/test/scr038/chk.bdb b/db-4.8.30/test/scr038/chk.bdb
new file mode 100644
index 0000000..73007cd
--- /dev/null
+++ b/db-4.8.30/test/scr038/chk.bdb
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+BDB_DIR="../../build_unix"
+rm -rf ./numismatics
+
+$BDB_DIR/db_sql -i data/all_data_types.sql -o sample.c -v sample_verify.c
+cc -g -I$BDB_DIR -L$BDB_DIR/.libs -o sample sample.c sample_verify.c $BDB_DIR/libdb.a -lpthread
+mkdir numismatics
+LD_LIBRARY_PATH=$BDB_DIR/.libs ./sample
+
diff --git a/db-4.8.30/test/scr038/data/17925.sql b/db-4.8.30/test/scr038/data/17925.sql
new file mode 100644
index 0000000..d76949d
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/17925.sql
@@ -0,0 +1,47 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR2(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
+
+CREATE INDEX unit_index ON coin(unit);
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR2(20),
+ city VARCHAR2(20));
+
+CREATE INDEX mid_index ON mint(mid);
+
+CREATE TABLE random (rid VARCHAR2(20) PRIMARY KEY,
+ chunk bin(127));
+
+CREATE TABLE table1 (att_int INT(8) PRIMARY KEY,
+ att_char CHAR(20),
+ att_varchar VARCHAR(20),
+ att_bit BIT,
+ att_tinyint TINYINT,
+ att_smallint SMALLINT(2),
+ att_integer INTEGER(4),
+ att_bigint BIGINT,
+ att_real REAL,
+ att_double DOUBLE,
+ att_float FLOAT,
+ att_decimal DECIMAL,
+ att_numeric NUMERIC,
+ att_bin bin(5));
+
+CREATE TABLE table2(att_int INT(8) PRIMARY KEY);
+
+CREATE TABLE table3(att_char CHAR(20) PRIMARY KEY);
+
+CREATE TABLE table4(att_bin bin(10) PRIMARY KEY);
+
+CREATE TABLE table5(att_bin bin(10) PRIMARY KEY,
+ att_bit BIT);
+
+CREATE TABLE table6(att_bin bin(10),
+ att_bit BIT PRIMARY KEY);
diff --git a/db-4.8.30/test/scr038/data/all_data_types.sql b/db-4.8.30/test/scr038/data/all_data_types.sql
new file mode 100644
index 0000000..0e8584e
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/all_data_types.sql
@@ -0,0 +1,17 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_int INT(8) PRIMARY KEY,
+ att_char CHAR(20),
+ att_varchar VARCHAR(20),
+ att_bit BIT,
+ att_tinyint TINYINT,
+ att_smallint SMALLINT(2),
+ att_integer INTEGER(4),
+ att_bigint BIGINT,
+ att_real REAL,
+ att_double DOUBLE,
+ att_float FLOAT,
+ att_decimal DECIMAL,
+ att_numeric NUMERIC,
+ att_bin bin(5));
diff --git a/db-4.8.30/test/scr038/data/bigint_table.sql b/db-4.8.30/test/scr038/data/bigint_table.sql
new file mode 100644
index 0000000..5488cf3
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/bigint_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_bigint BIGINT PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/bin_table.sql b/db-4.8.30/test/scr038/data/bin_table.sql
new file mode 100644
index 0000000..9f44fc4
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/bin_table.sql
@@ -0,0 +1,3 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_bin bin(10) PRIMARY KEY);
diff --git a/db-4.8.30/test/scr038/data/bit_table.sql b/db-4.8.30/test/scr038/data/bit_table.sql
new file mode 100644
index 0000000..68b5d14
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/bit_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_bit BIT PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/btree_table.sql b/db-4.8.30/test/scr038/data/btree_table.sql
new file mode 100644
index 0000000..17354e0
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/btree_table.sql
@@ -0,0 +1,6 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE mint (mid INT(8)PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20)); --+ DBTYPE = BTREE
+
diff --git a/db-4.8.30/test/scr038/data/cachesize.sql b/db-4.8.30/test/scr038/data/cachesize.sql
new file mode 100644
index 0000000..b578b08
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/cachesize.sql
@@ -0,0 +1,4 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/char_length.sql b/db-4.8.30/test/scr038/data/char_length.sql
new file mode 100644
index 0000000..45f8e68
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/char_length.sql
@@ -0,0 +1,5 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_int INT(8) PRIMARY KEY,
+ att_char CHAR(2)); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/char_table.sql b/db-4.8.30/test/scr038/data/char_table.sql
new file mode 100644
index 0000000..1db937e
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/char_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_char CHAR(20) PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/column_name_conflict_with_table.sql b/db-4.8.30/test/scr038/data/column_name_conflict_with_table.sql
new file mode 100644
index 0000000..aa9d21f
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/column_name_conflict_with_table.sql
@@ -0,0 +1,4 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (coin INT(8) PRIMARY KEY,
+ unit VARCHAR(20)); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/commented_dml.sql b/db-4.8.30/test/scr038/data/commented_dml.sql
new file mode 100644
index 0000000..4fc7c94
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/commented_dml.sql
@@ -0,0 +1,6 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+/* SELECT * from mint;*/
diff --git a/db-4.8.30/test/scr038/data/comments_at_the_end.sql b/db-4.8.30/test/scr038/data/comments_at_the_end.sql
new file mode 100644
index 0000000..8ae5387
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/comments_at_the_end.sql
@@ -0,0 +1,6 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+/* This is the end of sql. */
diff --git a/db-4.8.30/test/scr038/data/constraints_name_conflict_with_column.sql b/db-4.8.30/test/scr038/data/constraints_name_conflict_with_column.sql
new file mode 100644
index 0000000..5f2a4a6
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/constraints_name_conflict_with_column.sql
@@ -0,0 +1,13 @@
+CREATE DATABASE numismatics;
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT unit FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
+
+CREATE TABLE mint (mid INT(8)PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20));
+
diff --git a/db-4.8.30/test/scr038/data/constraints_on_nonexistent_column.sql b/db-4.8.30/test/scr038/data/constraints_on_nonexistent_column.sql
new file mode 100644
index 0000000..c7a5483
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/constraints_on_nonexistent_column.sql
@@ -0,0 +1,14 @@
+CREATE DATABASE numismatics;
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mmid));
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20));
+
+
diff --git a/db-4.8.30/test/scr038/data/constraints_on_nonexistent_table.sql b/db-4.8.30/test/scr038/data/constraints_on_nonexistent_table.sql
new file mode 100644
index 0000000..1147df6
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/constraints_on_nonexistent_table.sql
@@ -0,0 +1,9 @@
+CREATE DATABASE numismatics;
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
+
diff --git a/db-4.8.30/test/scr038/data/database_without_name.sql b/db-4.8.30/test/scr038/data/database_without_name.sql
new file mode 100644
index 0000000..00134e5
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/database_without_name.sql
@@ -0,0 +1,9 @@
+CREATE DATABASE;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2), -- just a comment here;
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
diff --git a/db-4.8.30/test/scr038/data/decimal_table.sql b/db-4.8.30/test/scr038/data/decimal_table.sql
new file mode 100644
index 0000000..f485eee
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/decimal_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_decimal DECIMAL PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/dml.sql b/db-4.8.30/test/scr038/data/dml.sql
new file mode 100644
index 0000000..96a8f5b
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/dml.sql
@@ -0,0 +1,6 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+SELECT * from mint;
diff --git a/db-4.8.30/test/scr038/data/double_table.sql b/db-4.8.30/test/scr038/data/double_table.sql
new file mode 100644
index 0000000..394b72d
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/double_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_double DOUBLE PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/float_table.sql b/db-4.8.30/test/scr038/data/float_table.sql
new file mode 100644
index 0000000..e04d0e0
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/float_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_float FLOAT PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/hash_table.sql b/db-4.8.30/test/scr038/data/hash_table.sql
new file mode 100644
index 0000000..229c592
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/hash_table.sql
@@ -0,0 +1,7 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20)); --+ DBTYPE = HASH
+
+
diff --git a/db-4.8.30/test/scr038/data/index_name_conflict_with_column.sql b/db-4.8.30/test/scr038/data/index_name_conflict_with_column.sql
new file mode 100644
index 0000000..d941eaf
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/index_name_conflict_with_column.sql
@@ -0,0 +1,18 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20));
+
+CREATE INDEX coin ON coin(unit);
+
+
+
diff --git a/db-4.8.30/test/scr038/data/index_without_name.sql b/db-4.8.30/test/scr038/data/index_without_name.sql
new file mode 100644
index 0000000..1c2b88d
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/index_without_name.sql
@@ -0,0 +1,8 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+
+CREATE INDEX ON coin(unit);
+
diff --git a/db-4.8.30/test/scr038/data/int_table.sql b/db-4.8.30/test/scr038/data/int_table.sql
new file mode 100644
index 0000000..7aa82d5
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/int_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_int INT PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/integer_table.sql b/db-4.8.30/test/scr038/data/integer_table.sql
new file mode 100644
index 0000000..5913866
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/integer_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_integer INTEGER PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/many_tables.sql b/db-4.8.30/test/scr038/data/many_tables.sql
new file mode 100644
index 0000000..44a7648
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/many_tables.sql
@@ -0,0 +1,93 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_int INT(8) PRIMARY KEY,
+ att_char CHAR(2),
+ att_varchar VARCHAR(20),
+ att_bit BIT,
+ att_tinyint TINYINT,
+ att_smallint SMALLINT(2),
+ att_integer INTEGER(4),
+ att_bigint BIGINT,
+ att_real REAL,
+ att_double DOUBLE,
+ att_float FLOAT,
+ att_decimal DECIMAL,
+ att_numeric NUMERIC,
+ att_bin bin(10),
+ CONSTRAINT table8_fk FOREIGN KEY(att_integer)
+ REFERENCES table8(att_integer));
+
+CREATE TABLE table2 (att_int INT(8) PRIMARY KEY);
+
+CREATE TABLE table3 (att_char CHAR(2) PRIMARY KEY);
+
+CREATE TABLE table4 (att_varchar VARCHAR(20) PRIMARY KEY);
+
+CREATE TABLE table5 (att_bit BIT PRIMARY KEY);
+
+CREATE TABLE table6 (att_tinyint TINYINT PRIMARY KEY);
+
+CREATE TABLE table7 (att_smallint SMALLINT(2) PRIMARY KEY);
+
+CREATE TABLE table8 (att_integer INTEGER(4) PRIMARY KEY);
+
+CREATE TABLE table9 (att_bigint BIGINT PRIMARY KEY);
+
+CREATE TABLE table10 (att_real REAL PRIMARY KEY);
+
+CREATE TABLE table11 (att_double DOUBLE PRIMARY KEY);
+
+CREATE TABLE table12 (att_float FLOAT PRIMARY KEY);
+
+CREATE TABLE table13 (att_decimal DECIMAL PRIMARY KEY);
+
+CREATE TABLE table14 (att_numeric NUMERIC PRIMARY KEY);
+
+CREATE TABLE table15 (att_binary bin(10) PRIMARY KEY);
+
+CREATE TABLE table16 (att_int INT(8) PRIMARY KEY,
+ att_char CHAR(2),
+ att_varchar VARCHAR(20),
+ att_bit BIT,
+ att_tinyint TINYINT,
+ att_smallint SMALLINT(2),
+ att_integer INTEGER(4),
+ att_bigint BIGINT,
+ att_real REAL,
+ att_double DOUBLE,
+ att_float FLOAT,
+ att_decimal DECIMAL,
+ att_numeric NUMERIC,
+ att_bin bin(10),
+ CONSTRAINT table17_fk FOREIGN KEY(att_int)
+ REFERENCES table17(att_int));
+
+CREATE TABLE table17 (att_int INT(8) PRIMARY KEY);
+
+CREATE TABLE table18 (att_char CHAR(2) PRIMARY KEY);
+
+CREATE TABLE table19 (att_varchar VARCHAR(20) PRIMARY KEY);
+
+CREATE TABLE table20 (att_bit BIT PRIMARY KEY);
+
+CREATE TABLE table21 (att_tinyint TINYINT PRIMARY KEY);
+
+CREATE TABLE table22 (att_smallint SMALLINT(2) PRIMARY KEY);
+
+CREATE TABLE table23 (att_integer INTEGER(4) PRIMARY KEY);
+
+CREATE TABLE table24 (att_bigint BIGINT PRIMARY KEY);
+
+CREATE TABLE table25 (att_real REAL PRIMARY KEY);
+
+CREATE TABLE table26 (att_double DOUBLE PRIMARY KEY);
+
+CREATE TABLE table27 (att_float FLOAT PRIMARY KEY);
+
+CREATE TABLE table28 (att_decimal DECIMAL PRIMARY KEY);
+
+CREATE TABLE table29 (att_numeric NUMERIC PRIMARY KEY);
+
+CREATE TABLE table30 (att_binary bin(100) PRIMARY KEY);
+
diff --git a/db-4.8.30/test/scr038/data/no_primary_key_constraint.sql b/db-4.8.30/test/scr038/data/no_primary_key_constraint.sql
new file mode 100644
index 0000000..b2c6d30
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/no_primary_key_constraint.sql
@@ -0,0 +1,4 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8),
+ unit VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/nonexistent_column_constraints.sql b/db-4.8.30/test/scr038/data/nonexistent_column_constraints.sql
new file mode 100644
index 0000000..8bf001c
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/nonexistent_column_constraints.sql
@@ -0,0 +1,13 @@
+CREATE DATABASE numismatics;
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(id)
+ REFERENCES mint(mid));
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20)); --+ DBTYPE = HASH
+
diff --git a/db-4.8.30/test/scr038/data/numeric_table.sql b/db-4.8.30/test/scr038/data/numeric_table.sql
new file mode 100644
index 0000000..aef7314
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/numeric_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_numeric NUMERIC PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/only_database.sql b/db-4.8.30/test/scr038/data/only_database.sql
new file mode 100644
index 0000000..b5cc7f4
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/only_database.sql
@@ -0,0 +1 @@
+CREATE DATABASE numismatics;
diff --git a/db-4.8.30/test/scr038/data/only_index.sql b/db-4.8.30/test/scr038/data/only_index.sql
new file mode 100644
index 0000000..1d6a7c4
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/only_index.sql
@@ -0,0 +1,3 @@
+
+CREATE INDEX unit_index ON coin(unit);
+
diff --git a/db-4.8.30/test/scr038/data/only_table.sql b/db-4.8.30/test/scr038/data/only_table.sql
new file mode 100644
index 0000000..49ea66f
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/only_table.sql
@@ -0,0 +1,6 @@
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+CREATE TABLE mint (mid INT(8),
+ country VARCHAR(20),
+ city VARCHAR(20)); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/real_table.sql b/db-4.8.30/test/scr038/data/real_table.sql
new file mode 100644
index 0000000..e542969
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/real_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_real REAL PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/sample.sql b/db-4.8.30/test/scr038/data/sample.sql
new file mode 100644
index 0000000..62689c6
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/sample.sql
@@ -0,0 +1,20 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR2(20),
+ value NUMERIC(8,2), -- just a comment here;
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR2(20),
+ city VARCHAR2(20)); --+ DBTYPE = HASH
+
+CREATE INDEX unit_index ON coin(unit);
+
+CREATE TABLE random (rid VARCHAR2(20) PRIMARY KEY,
+ chunk bin(127));
+
+/* SELECT * from mint;*/
diff --git a/db-4.8.30/test/scr038/data/smallint_table.sql b/db-4.8.30/test/scr038/data/smallint_table.sql
new file mode 100644
index 0000000..18b9a58
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/smallint_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_smallint SMALLINT(2) PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/table_name_conflict_with_database.sql b/db-4.8.30/test/scr038/data/table_name_conflict_with_database.sql
new file mode 100644
index 0000000..c229f4f
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/table_name_conflict_with_database.sql
@@ -0,0 +1,3 @@
+CREATE DATABASE numismatics;
+CREATE TABLE numismatics(cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/table_without_name.sql b/db-4.8.30/test/scr038/data/table_without_name.sql
new file mode 100644
index 0000000..f662401
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/table_without_name.sql
@@ -0,0 +1,9 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2), -- just a comment here;
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
diff --git a/db-4.8.30/test/scr038/data/tiny_table.sql b/db-4.8.30/test/scr038/data/tiny_table.sql
new file mode 100644
index 0000000..e29a1c9
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/tiny_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_tinyint TINYINT PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/two_different_databases.sql b/db-4.8.30/test/scr038/data/two_different_databases.sql
new file mode 100644
index 0000000..c2be268
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_different_databases.sql
@@ -0,0 +1,10 @@
+CREATE DATABASE numismatics;
+CREATE DATABASE numismatics1;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
diff --git a/db-4.8.30/test/scr038/data/two_different_indexes.sql b/db-4.8.30/test/scr038/data/two_different_indexes.sql
new file mode 100644
index 0000000..f029235
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_different_indexes.sql
@@ -0,0 +1,26 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2), -- just a comment here;
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20),
+ zip_code INT(8)); --+ DBTYPE = HASH
+
+
+
+CREATE INDEX value_index ON coin(value);
+
+CREATE INDEX mint_id_index ON coin(mint_id);
+
+CREATE INDEX mintage_year_index ON coin(mintage_year);
+
+CREATE INDEX zip_code_index ON mint(zip_code);
+
+/* SELECT * from mint;*/
diff --git a/db-4.8.30/test/scr038/data/two_different_tables.sql b/db-4.8.30/test/scr038/data/two_different_tables.sql
new file mode 100644
index 0000000..7643057
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_different_tables.sql
@@ -0,0 +1,8 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/two_indexes_on_same_column.sql b/db-4.8.30/test/scr038/data/two_indexes_on_same_column.sql
new file mode 100644
index 0000000..46fa409
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_indexes_on_same_column.sql
@@ -0,0 +1,10 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_int INT(8) PRIMARY KEY,
+ att_char CHAR(2));
+
+CREATE INDEX int_index ON table1(att_int);
+
+CREATE INDEX int_index_1 ON table1(att_int);
+
+
diff --git a/db-4.8.30/test/scr038/data/two_same_columns.sql b/db-4.8.30/test/scr038/data/two_same_columns.sql
new file mode 100644
index 0000000..44f3612
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_columns.sql
@@ -0,0 +1,12 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
+
+
diff --git a/db-4.8.30/test/scr038/data/two_same_databases.sql b/db-4.8.30/test/scr038/data/two_same_databases.sql
new file mode 100644
index 0000000..549a9dd
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_databases.sql
@@ -0,0 +1,11 @@
+CREATE DATABASE numismatics;
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2), -- just a comment here;
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
diff --git a/db-4.8.30/test/scr038/data/two_same_indexes.sql b/db-4.8.30/test/scr038/data/two_same_indexes.sql
new file mode 100644
index 0000000..a5ddd64
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_indexes.sql
@@ -0,0 +1,11 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8));
+
+
+CREATE INDEX unit_index ON coin(unit);
+
+CREATE INDEX unit_index ON coin(unit); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/two_same_name_columns.sql b/db-4.8.30/test/scr038/data/two_same_name_columns.sql
new file mode 100644
index 0000000..e257552
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_name_columns.sql
@@ -0,0 +1,9 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ unit NUMERIC(8,2),
+ mintage_year INT(8),
+ mint_id INT(8),
+ CONSTRAINT mint_id_fk FOREIGN KEY(mint_id)
+ REFERENCES mint(mid));
diff --git a/db-4.8.30/test/scr038/data/two_same_name_columns_in_different_tables.sql b/db-4.8.30/test/scr038/data/two_same_name_columns_in_different_tables.sql
new file mode 100644
index 0000000..648b3bd
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_name_columns_in_different_tables.sql
@@ -0,0 +1,6 @@
+CREATE DATABASE numismatics;
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/two_same_name_indexes.sql b/db-4.8.30/test/scr038/data/two_same_name_indexes.sql
new file mode 100644
index 0000000..533bb94
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_name_indexes.sql
@@ -0,0 +1,11 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2),
+ mintage_year INT(8));
+
+CREATE INDEX unit_index ON coin(unit);
+
+CREATE INDEX unit_index1 ON coin(unit);
+
diff --git a/db-4.8.30/test/scr038/data/two_same_name_tables.sql b/db-4.8.30/test/scr038/data/two_same_name_tables.sql
new file mode 100644
index 0000000..17c8063
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_name_tables.sql
@@ -0,0 +1,8 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+CREATE TABLE coin (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/two_same_tables.sql b/db-4.8.30/test/scr038/data/two_same_tables.sql
new file mode 100644
index 0000000..7775ca1
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/two_same_tables.sql
@@ -0,0 +1,7 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/unsupported_access_method.sql b/db-4.8.30/test/scr038/data/unsupported_access_method.sql
new file mode 100644
index 0000000..203ed8a
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/unsupported_access_method.sql
@@ -0,0 +1,8 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE mint (mid INT(8) PRIMARY KEY,
+ country VARCHAR(20),
+ city VARCHAR(20)); --+ DBTYPE = QUEUE
+
+
+
diff --git a/db-4.8.30/test/scr038/data/unsupported_data_type.sql b/db-4.8.30/test/scr038/data/unsupported_data_type.sql
new file mode 100644
index 0000000..0e8f357
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/unsupported_data_type.sql
@@ -0,0 +1,4 @@
+CREATE DATABASE numismatics;
+
+CREATE TABLE coin (cid STRING PRIMARY KEY,
+ unit VARCHAR(20));
diff --git a/db-4.8.30/test/scr038/data/varchar_length.sql b/db-4.8.30/test/scr038/data/varchar_length.sql
new file mode 100644
index 0000000..2d9da9f
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/varchar_length.sql
@@ -0,0 +1,4 @@
+CREATE DATABASE numismatics; /*+ CACHESIZE = 16m */
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(2)); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/varchar_table.sql b/db-4.8.30/test/scr038/data/varchar_table.sql
new file mode 100644
index 0000000..eac0404
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/varchar_table.sql
@@ -0,0 +1,4 @@
+
+CREATE DATABASE numismatics;
+
+CREATE TABLE table1 (att_varchar VARCHAR(20) PRIMARY KEY); \ No newline at end of file
diff --git a/db-4.8.30/test/scr038/data/wrong_create_sequence.sql b/db-4.8.30/test/scr038/data/wrong_create_sequence.sql
new file mode 100644
index 0000000..c166460
--- /dev/null
+++ b/db-4.8.30/test/scr038/data/wrong_create_sequence.sql
@@ -0,0 +1,10 @@
+
+CREATE INDEX unit_index ON coin(unit);
+
+CREATE TABLE coin (cid INT(8) PRIMARY KEY,
+ unit VARCHAR(20),
+ value NUMERIC(8,2), -- just a comment here;
+ mintage_year INT(8),
+ mint_id INT(8));
+
+CREATE DATABASE numismatics;
diff --git a/db-4.8.30/test/scr038/nMakefile b/db-4.8.30/test/scr038/nMakefile
new file mode 100644
index 0000000..59926c8
--- /dev/null
+++ b/db-4.8.30/test/scr038/nMakefile
@@ -0,0 +1,14 @@
+# Makefile for building db_sql generated code on Windows.
+
+!include <win32.mak>
+
+bdb_dir = "../../build_windows"
+
+all: sample.exe
+
+.c.obj:
+ $(cc) $(cdebug) $(cflags) $(cvars) /I$(bdb_dir) $*.c
+
+sample.exe: sample.obj sample_verify.obj
+ $(link) $(ldebug) $(conflags) -out:sample.exe sample.obj sample_verify.obj $(conlibs) /LIBPATH:$(bdb_dir)/Win32/Debug libdb48d.lib
+
diff --git a/db-4.8.30/test/sdb001.tcl b/db-4.8.30/test/sdb001.tcl
new file mode 100644
index 0000000..6999770
--- /dev/null
+++ b/db-4.8.30/test/sdb001.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb001 Tests mixing db and subdb operations
+# TEST Tests mixing db and subdb operations
+# TEST Create a db, add data, try to create a subdb.
+# TEST Test naming db and subdb with a leading - for correct parsing
+# TEST Existence check -- test use of -excl with subdbs
+# TEST
+# TEST Test non-subdb and subdb operations
+# TEST Test naming (filenames begin with -)
+# TEST Test existence (cannot create subdb of same name with -excl)
+proc sdb001 { method args } {
+ source ./include.tcl
+ global errorInfo
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping for method $method"
+ return
+ }
+ puts "Subdb001: $method ($args) subdb and non-subdb tests"
+
+ set testfile $testdir/subdb001.db
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb001 skipping for env $env"
+ return
+ }
+ # Create the database and open the dictionary
+ set subdb subdb0
+ cleanup $testdir NULL
+ puts "\tSubdb001.a: Non-subdb database and subdb operations"
+ #
+ # Create a db with no subdbs. Add some data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ while { [gets $did str] != -1 && $count < 5 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+ #
+ # Create a db with no subdbs. Add no data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ set testfile $testdir/subdb001a.db
+ puts "\tSubdb001.a.1: Create db, close, try subdb"
+ #
+ # !!!
+ # Using -truncate is illegal when opening for subdbs, but we
+ # can use it here because we are not using subdbs for this
+ # create.
+ #
+ set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+
+ if { [is_queue $method] == 1 || [is_partitioned $args]} {
+ puts "Subdb001: skipping remainder of test for method $method $args"
+ return
+ }
+
+ #
+ # Test naming, db and subdb names beginning with -.
+ #
+ puts "\tSubdb001.b: Naming"
+ set cwd [pwd]
+ cd $testdir
+ set testfile1 -subdb001.db
+ set subdb -subdb
+ puts "\tSubdb001.b.0: Create db and subdb with -name, no --"
+ set ret [catch {eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile1 $subdb}} db]
+ error_check_bad dbopen $ret 0
+ puts "\tSubdb001.b.1: Create db and subdb with -name, with --"
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod -- $testfile1 $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ cd $cwd
+
+ #
+ # Create 1 db with 1 subdb. Try to create another subdb of
+ # the same name. Should fail.
+ #
+
+ puts "\tSubdb001.c: Existence check"
+ set testfile $testdir/subdb001d.db
+ set subdb subdb
+ set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [catch {eval {berkdb_open_noerr -create -excl -mode 0644} \
+ $args {$omethod $testfile $subdb}} db1]
+ error_check_bad dbopen $ret 0
+ error_check_good db_close [$db close] 0
+
+ return
+}
diff --git a/db-4.8.30/test/sdb002.tcl b/db-4.8.30/test/sdb002.tcl
new file mode 100644
index 0000000..95ea82a
--- /dev/null
+++ b/db-4.8.30/test/sdb002.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb002
+# TEST Tests basic subdb functionality
+# TEST Small keys, small data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST Then repeat using an environment.
+proc sdb002 { method {nentries 10000} args } {
+ global passwd
+ global has_crypto
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb002 skipping for env $env"
+ return
+ }
+ set largs $args
+ subdb002_main $method $nentries $largs
+ append largs " -chksum "
+ subdb002_main $method $nentries $largs
+
+ # Skip remainder of test if release does not support encryption.
+ if { $has_crypto == 0 } {
+ return
+ }
+
+ append largs "-encryptaes $passwd "
+ subdb002_main $method $nentries $largs
+}
+
+proc subdb002_main { method nentries largs } {
+ source ./include.tcl
+ global encrypt
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+
+ puts "Subdb002: $method ($largs) basic subdb tests"
+ set testfile $testdir/subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile NULL
+
+ # Run convert_encrypt so that old_encrypt will be reset to
+ # the proper value and cleanup will work.
+ convert_encrypt $largs
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ cleanup $testdir NULL
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_env berkdb_env_noerr
+ } else {
+ set sdb002_env berkdb_env
+ }
+ set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \
+ -mode 0644} -home $testdir $encargs]
+ error_check_good env_open [is_valid_env $env] TRUE
+ puts "Subdb002: $method ($largs) basic subdb tests in an environment"
+
+ # We're in an env--use default path to database rather than specifying
+ # it explicitly.
+ set testfile subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile $env
+ error_check_good env_close [$env close] 0
+}
+
+proc subdb002_body { method omethod nentries largs testfile env } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ # Create the database and open the dictionary
+ set subdb subdb0
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_open berkdb_open_noerr
+ } else {
+ set sdb002_open berkdb_open
+ }
+
+ if { $env == "NULL" } {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {$omethod $testfile $subdb}} db]
+ } else {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {-env $env $omethod $testfile $subdb}} db]
+ }
+
+ #
+ # If -queue method, we need to make sure that trying to
+ # create a subdb fails.
+ if { [is_queue $method] == 1 } {
+ error_check_bad dbopen $ret 0
+ puts "Subdb002: skipping remainder of test for method $method"
+ return
+ }
+
+ error_check_good dbopen $ret 0
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb002_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb002.check
+ }
+ puts "\tSubdb002.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tSubdb002.b: dump file"
+ set txn ""
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdb002.d: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.e: db_dump with subdatabase"
+ set outfile $testdir/subdb002.dump
+ set dumpargs " -f $outfile -s $subdb "
+ if { $encrypt > 0 } {
+ append dumpargs " -P $passwd "
+ }
+ if { $env != "NULL" } {
+ append dumpargs " -h $testdir "
+ }
+ append dumpargs " $testfile"
+ set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret]
+ error_check_good dbdump.subdb $stat 0
+}
+
+# Check function for Subdb002; keys and data are identical
+proc subdb002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb002_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/sdb003.tcl b/db-4.8.30/test/sdb003.tcl
new file mode 100644
index 0000000..b89edc0
--- /dev/null
+++ b/db-4.8.30/test/sdb003.tcl
@@ -0,0 +1,186 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb003
+# TEST Tests many subdbs
+# TEST Creates many subdbs and puts a small amount of
+# TEST data in each (many defaults to 1000)
+# TEST
+# TEST Use the first 1000 entries from the dictionary as subdbnames.
+# TEST Insert each with entry as name of subdatabase and a partial list
+# TEST as key/data. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+proc sdb003 { method {nentries 1000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb003: skipping for method $method"
+ return
+ }
+
+ puts "Subdb003: $method ($args) many subdb tests"
+
+ set txnenv 0
+ set rpcenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb003.db
+ set env NULL
+ } else {
+ set testfile subdb003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set fcount 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb003_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set ndataent 10
+ set fdid [open $dict]
+ while { [gets $fdid str] != -1 && $fcount < $nentries } {
+ # Unlike the standard API, RPC doesn't support empty
+ # database names. [#15600]
+ if { $str == "" && $rpcenv == 1 } {
+ continue
+ }
+
+ set subdb $str
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $ndataent } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ incr fcount
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $ndataent} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $ndataent $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ if { [expr $fcount % 100] == 0 } {
+ puts -nonewline "$fcount "
+ flush stdout
+ }
+ }
+ close $fdid
+ puts ""
+}
+
+# Check function for Subdb003; keys and data are identical
+proc subdb003.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb003_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/sdb004.tcl b/db-4.8.30/test/sdb004.tcl
new file mode 100644
index 0000000..0d36d42
--- /dev/null
+++ b/db-4.8.30/test/sdb004.tcl
@@ -0,0 +1,240 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb004
+# TEST Tests large subdb names
+# TEST subdb name = filecontents,
+# TEST key = filename, data = filecontents
+# TEST Put/get per key
+# TEST Dump file
+# TEST Dump subdbs, verify data and subdb name match
+# TEST
+# TEST Create 1 db with many large subdbs. Use the contents as subdb names.
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc sdb004 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb004: skipping for method $method"
+ return
+ }
+
+ puts "Subdb004: $method ($args) \
+ filecontents=subdbname filename=key filecontents=data pairs"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb004.db
+ set env NULL
+ } else {
+ set testfile subdb004.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb004_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc subdb004.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ # Note that the subdatabase name is passed in as a char *, not
+ # in a DBT, so it may not contain nulls; use only source files.
+ set file_list [glob $src_root/*/*.c]
+ set fcount [llength $file_list]
+ if { $txnenv == 1 && $fcount > 100 } {
+ set file_list [lrange $file_list 0 99]
+ set fcount 100
+ }
+
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $fcount} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ }
+ puts "\tSubdb004.a: Set/Check each subdb"
+ foreach f $file_list {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ set subdb $data
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+
+ error_check_good Subdb004:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ # puts "\tSubdb004.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ }
+
+ #
+ # Now for each file, check that the subdb name is the same
+ # as the data in that subdb and that the filename is the key.
+ #
+ puts "\tSubdb004.b: Compare subdb names with key/data"
+ set db [eval {berkdb_open -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get -first] } { [llength $d] != 0 } \
+ {set d [$c get -next] } {
+ set subdbname [lindex [lindex $d 0] 0]
+ set subdb [eval {berkdb_open} $args {$testfile $subdbname}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Output the subdb name
+ set ofid [open $t3 w]
+ fconfigure $ofid -translation binary
+ if { [string compare "\0" \
+ [string range $subdbname end end]] == 0 } {
+ set slen [expr [string length $subdbname] - 2]
+ set subdbname [string range $subdbname 1 $slen]
+ }
+ puts -nonewline $ofid $subdbname
+ close $ofid
+
+ # Output the data
+ set subc [eval {$subdb cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $subc $subdb] TRUE
+ set d [$subc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set key [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+
+ set ofid [open $t1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $key $t1
+ $checkfunc $key $t3
+
+ error_check_good Subdb004:diff($t3,$t1) \
+ [filecmp $t3 $t1] 0
+ error_check_good curs_close [$subc close] 0
+ error_check_good db_close [$subdb close] 0
+ }
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_record_based $method] != 1 } {
+ fileremove $t2.tmp
+ }
+}
+
+# Check function for subdb004; key should be file name; data should be contents
+proc subdb004.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Subdb004:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc subdb004_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Subdb004:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/db-4.8.30/test/sdb005.tcl b/db-4.8.30/test/sdb005.tcl
new file mode 100644
index 0000000..2c9b8b3
--- /dev/null
+++ b/db-4.8.30/test/sdb005.tcl
@@ -0,0 +1,161 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb005
+# TEST Tests cursor operations in subdbs
+# TEST Put/get per key
+# TEST Verify cursor operations work within subdb
+# TEST Verify cursor operations do not work across subdbs
+# TEST
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc sdb005 {method {nentries 100} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb005: skipping for method $method"
+ return
+ }
+
+ puts "Subdb005: $method ( $args ) subdb cursor operations test"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb005.db
+ set env NULL
+ } else {
+ set testfile subdb005.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ if { $nentries == 100 } {
+ set nentries 20
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+ set txn ""
+ set psize 8192
+ set duplist {-1 -1 -1 -1 -1}
+ build_all_subdb \
+ $testfile [list $method] $psize $duplist $nentries $args
+ set numdb [llength $duplist]
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0} {$i < $numdb} {incr i} {
+ set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set db_handle($i) $db
+ # Used in 005.c test
+ lappend subdbnames sub$i.db
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Get a second cursor for cursor comparison test.
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor2 [is_valid_cursor $dbc2 $db] TRUE
+
+ set d [$dbc get -first]
+ set d2 [$dbc2 get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+
+ # Cursor comparison: both are on get -first.
+ error_check_good dbc2_cmp [$dbc cmp $dbc2] 0
+
+ # Used in 005.b test
+ set db_key($i) [lindex [lindex $d 0] 0]
+
+ set d [$dbc get -prev]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+
+ set d [$dbc get -last]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+
+ # Cursor comparison: the first cursor has moved to
+ # get -last.
+ error_check_bad dbc2_cmp [$dbc cmp $dbc2] 0
+
+ set d [$dbc get -next]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ }
+ #
+ # Get a key from each subdb and try to get this key in a
+ # different subdb. Make sure it fails
+ #
+ puts "\tSubdb005.b: Get keys in different subdb's"
+ for {set i 0} {$i < $numdb} {incr i} {
+ set n [expr $i + 1]
+ if {$n == $numdb} {
+ set n 0
+ }
+ set db $db_handle($i)
+ if { [is_record_based $method] == 1 } {
+ set d [eval {$db get -recno} $txn {$db_key($n)}]
+ error_check_good \
+ db_get [expr [llength $d] == 0] 1
+ } else {
+ set d [eval {$db get} $txn {$db_key($n)}]
+ error_check_good db_get [expr [llength $d] == 0] 1
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ #
+ # Clean up
+ #
+ for {set i 0} {$i < $numdb} {incr i} {
+ error_check_good db_close [$db_handle($i) close] 0
+ }
+
+ #
+ # Check contents of DB for subdb names only. Makes sure that
+ # every subdbname is there and that nothing else is there.
+ #
+ puts "\tSubdb005.c: Check DB is read-only"
+ error_check_bad dbopen [catch \
+ {berkdb_open_noerr -unknown $testfile} ret] 0
+
+ puts "\tSubdb005.d: Check contents of DB for subdb names only"
+ set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set subdblist [$db get -glob *]
+ foreach kd $subdblist {
+ # subname also used in subdb005.e,f below
+ set subname [lindex $kd 0]
+ set i [lsearch $subdbnames $subname]
+ error_check_good subdb_search [expr $i != -1] 1
+ set subdbnames [lreplace $subdbnames $i $i]
+ }
+ error_check_good subdb_done [llength $subdbnames] 0
+
+ error_check_good db_close [$db close] 0
+ return
+}
diff --git a/db-4.8.30/test/sdb006.tcl b/db-4.8.30/test/sdb006.tcl
new file mode 100644
index 0000000..fae0045
--- /dev/null
+++ b/db-4.8.30/test/sdb006.tcl
@@ -0,0 +1,168 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb006
+# TEST Tests intra-subdb join
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# TEST everything else does as well. We'll create test databases called
+# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+# TEST ... where N is the number of the database. Primary.db is the primary
+# TEST database, and sub0.db is the database that has no matching duplicates.
+# TEST All of these are within a single database.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc sdb006 {method {nentries 100} args } {
+ source ./include.tcl
+ global rand_init
+
+ # NB: these flags are internal only, ok
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "\tSubdb006 skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb006.db
+ set env NULL
+ } else {
+ set testfile subdb006.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 100 } {
+ # !!!
+ # nentries must be greater than the number
+ # of do_join_subdb calls below.
+ #
+ set nentries 35
+ }
+ }
+ set testdir [get_home $env]
+ }
+ berkdb srand $rand_init
+
+ set oargs $args
+ foreach opt {" -dup" " -dupsort"} {
+ append args $opt
+
+ puts "Subdb006: $method ( $args ) Intra-subdb join"
+ set txn ""
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb006.a: Intra-subdb join"
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set psize 8192
+ set duplist {0 50 25 16 12}
+ set numdb [llength $duplist]
+ build_all_subdb $testfile [list $method] $psize \
+ $duplist $nentries $args
+
+ # Build the primary
+ puts "Subdb006: Building the primary database $method"
+ set oflags "-create -mode 0644 [conv $omethod \
+ [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags $oargs $testfile primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key [format "%04d" $i]
+ set ret [eval {$db put} $txn {$key stub}]
+ error_check_good "primary put" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join_subdb $testfile primary.db "1 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "0 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
+
+ close $did
+ }
+}
diff --git a/db-4.8.30/test/sdb007.tcl b/db-4.8.30/test/sdb007.tcl
new file mode 100644
index 0000000..9d39738
--- /dev/null
+++ b/db-4.8.30/test/sdb007.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb007
+# TEST Tests page size difference errors between subdbs.
+# TEST If the physical file already exists, we ignore pagesize specifications
+# TEST on any subsequent -creates.
+# TEST
+# TEST 1. Create/open a subdb with system default page size.
+# TEST Create/open a second subdb specifying a different page size.
+# TEST The create should succeed, but the pagesize of the new db
+# TEST will be the system default page size.
+# TEST 2. Create/open a subdb with a specified, non-default page size.
+# TEST Create/open a second subdb specifying a different page size.
+# TEST The create should succeed, but the pagesize of the new db
+# TEST will be the specified page size from the first create.
+
+proc sdb007 { method args } {
+ source ./include.tcl
+
+ set db2args [convert_args -btree $args]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb007: skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Subdb007: skipping for specific page sizes"
+ return
+ }
+
+ puts "Subdb007: $method ($args) subdb tests with different page sizes"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb007.db
+ set env NULL
+ } else {
+ set testfile subdb007.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ append db2args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+ set txn ""
+
+ puts "\tSubdb007.a.0: create subdb with default page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args $envargs {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+
+ # Figure out what the default page size is so that we can send
+ # a different value to the next -create call.
+ set default_psize [stat_field $db stat "Page size"]
+ error_check_good dbclose [$db close] 0
+
+ if { $default_psize == 512 } {
+ set psize 2048
+ } else {
+ set psize 512
+ }
+
+ puts "\tSubdb007.a.1: Create 2nd subdb with different specified page size"
+ set db2 [eval {berkdb_open -create -btree} \
+ $db2args $envargs {-pagesize $psize $testfile $sub2}]
+ error_check_good db2_create [is_valid_db $db2] TRUE
+
+ set actual_psize [stat_field $db2 stat "Page size"]
+ error_check_good check_pagesize [expr $actual_psize == $default_psize] 1
+ error_check_good db2close [$db2 close] 0
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.b.0: Create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args $envargs {-pagesize $psize $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.b.1: Create 2nd subdb with different specified page size"
+ set newpsize [expr $psize * 2]
+ set db2 [eval {berkdb_open -create -mode 0644} $args \
+ $envargs {-pagesize $newpsize $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db2] TRUE
+ set actual_psize [stat_field $db2 stat "Page size"]
+ error_check_good check_pagesize [expr $actual_psize == $psize] 1
+ error_check_good db2close [$db2 close] 0
+}
diff --git a/db-4.8.30/test/sdb008.tcl b/db-4.8.30/test/sdb008.tcl
new file mode 100644
index 0000000..c4021b0
--- /dev/null
+++ b/db-4.8.30/test/sdb008.tcl
@@ -0,0 +1,93 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb008
+# TEST Tests explicit setting of lorders for subdatabases -- the
+# TEST lorder should be ignored.
+proc sdb008 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb008: skipping for method $method"
+ return
+ }
+ set eindex [lsearch -exact $args "-env"]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile1 $testdir/subdb008a.db
+ set testfile2 $testdir/subdb008b.db
+ set env NULL
+ } else {
+ set testfile1 subdb008a.db
+ set testfile2 subdb008b.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Subdb008: $method ($args) subdb tests with different lorders"
+
+ puts "\tSubdb008.a.0: create subdb with system default lorder"
+ set lorder "1234"
+ if { [big_endian] } {
+ set lorder "4321"
+ }
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile1 "sub1"}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ # Explicitly try to create subdb's of each byte order. In both
+ # cases the subdb should be forced to the byte order of the
+ # parent database.
+ puts "\tSubdb008.a.1: Try to create subdb with -1234 lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 1234 $omethod $testfile1 "sub2"}]
+ error_check_good lorder_1234 [eval $db get_lorder] $lorder
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.a.2: Try to create subdb with -4321 lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 4321 $omethod $testfile1 "sub3"}]
+ error_check_good lorder_4321 [eval $db get_lorder] $lorder
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.0: create subdb with non-default lorder"
+ set reverse_lorder "4321"
+ if { [big_endian] } {
+ set reverse_lorder "1234"
+ }
+ set db [eval {berkdb_open -create -mode 0644} \
+ {-lorder $reverse_lorder} $args {$omethod $testfile2 "sub1"}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.1: Try to create subdb with -1234 lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 1234 $omethod $testfile2 "sub2"}]
+ error_check_good lorder_1234 [eval $db get_lorder] $reverse_lorder
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.2: Try to create subdb with -4321 lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 4321 $omethod $testfile2 "sub3"}]
+ error_check_good lorder_4321 [eval $db get_lorder] $reverse_lorder
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+}
diff --git a/db-4.8.30/test/sdb009.tcl b/db-4.8.30/test/sdb009.tcl
new file mode 100644
index 0000000..b38a763
--- /dev/null
+++ b/db-4.8.30/test/sdb009.tcl
@@ -0,0 +1,107 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb009
+# TEST Test DB->rename() method for subdbs
+proc sdb009 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Subdb009: $method ($args): Test of DB->rename()"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb009: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb009.db
+ set env NULL
+ } else {
+ set testfile subdb009.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set oldsdb OLDDB
+ set newsdb NEWDB
+
+ # Make sure we're starting from a clean slate.
+ cleanup $testdir $env
+ error_check_bad "$testfile exists" [file exists $testfile] 1
+
+ puts "\tSubdb009.a: Create/rename file"
+ puts "\t\tSubdb009.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644}\
+ $omethod $args {$testfile $oldsdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set txn ""
+ set key 1
+ set data [pad_data $method data]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key $data}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tSubdb009.a.2: rename"
+ error_check_good rename_file [eval {berkdb dbrename} $envargs \
+ {$testfile $oldsdb $newsdb}] 0
+
+ puts "\t\tSubdb009.a.3: check"
+ # Open again with create to make sure we've really completely
+ # disassociated the subdb from the old name.
+ set odb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $oldsdb]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ set odbt [$odb get $key]
+ error_check_good odb_close [$odb close] 0
+
+ set ndb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $newsdb]
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+ set ndbt [$ndb get $key]
+ error_check_good ndb_close [$ndb close] 0
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ # Now there's both an old and a new. Rename the "new" to the "old"
+ # and make sure that fails.
+ puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
+ set ret [catch {eval {berkdb dbrename} $envargs $testfile \
+ $oldsdb $newsdb} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ puts "\tSubdb009 succeeded."
+}
diff --git a/db-4.8.30/test/sdb010.tcl b/db-4.8.30/test/sdb010.tcl
new file mode 100644
index 0000000..eb82ced
--- /dev/null
+++ b/db-4.8.30/test/sdb010.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb010
+# TEST Test DB->remove() method and DB->truncate() for subdbs
+proc sdb010 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Subdb010: Test of DB->remove() and DB->truncate"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb010: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are not given an env, create one.
+ if { $eindex == -1 } {
+ set env [berkdb_env -create -home $testdir -mode 0644]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set testfile subdb010.db
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+
+ cleanup $testdir $env
+
+ set txn ""
+ set testdb DATABASE
+ set testdb2 DATABASE2
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $envargs $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.a: Test of DB->remove()"
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ $testfile $testdb] 0
+
+ # File should still exist.
+ error_check_good file_exists_after [file exists $tfpath] 1
+
+ # But database should not.
+ set ret [catch {eval berkdb_open $omethod \
+ $args $envargs $testfile $testdb} res]
+ error_check_bad open_failed ret 0
+ error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
+
+ puts "\tSubdb010.b: Setup for DB->truncate()"
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key1 1
+ set key2 2
+ set data1 [pad_data $method data1]
+ set data2 [pad_data $method data2]
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $envargs {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $envargs $testfile $testdb2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+
+ puts "\tSubdb010.c: truncate"
+ #
+ # Return value should be 1, the count of how many items were
+ # destroyed when we truncated.
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $envargs $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good trunc_subdb [eval {$db truncate} $txn] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.d: check"
+ set db [eval {berkdb_open} $args $envargs {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set kd [$dbc get -first]
+ error_check_good trunc_dbcget [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open} $args $envargs {$testfile $testdb2}]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db2 cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE
+ set kd [$dbc get -first]
+ error_check_bad notrunc_dbcget1 [llength $kd] 0
+ set db2kd [list [list $key2 $data2]]
+ error_check_good key2 $kd $db2kd
+ set kd [$dbc get -next]
+ error_check_good notrunc_dbget2 [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+
+ # If we created our env, close it.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/sdb011.tcl b/db-4.8.30/test/sdb011.tcl
new file mode 100644
index 0000000..5ac55b3
--- /dev/null
+++ b/db-4.8.30/test/sdb011.tcl
@@ -0,0 +1,140 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb011
+# TEST Test deleting Subdbs with overflow pages
+# TEST Create 1 db with many large subdbs.
+# TEST Test subdatabases with overflow pages.
+proc sdb011 { method {ndups 13} {nsubdbs 10} args} {
+ global names
+ source ./include.tcl
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb011: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set max_files 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb011.db
+ set env NULL
+ set tfpath $testfile
+ } else {
+ set testfile subdb011.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ set max_files 50
+ if { $ndups == 13 } {
+ set ndups 7
+ }
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+
+ # Create the database and open the dictionary
+
+ cleanup $testdir $env
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ set flen [llength $file_list]
+ puts "Subdb011: $method ($args) $ndups overflow dups with \
+ $flen filename=key filecontents=data pairs"
+
+ puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups"
+ set slist {}
+ set i 0
+ set count 0
+ foreach f $file_list {
+ set i [expr $i % $nsubdbs]
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ set subdb subdb$i
+ lappend slist $subdb
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for {set dup 0} {$dup < $ndups} {incr dup} {
+ set data $dup:$filecont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good dbclose [$db close] 0
+ incr i
+ incr count
+ }
+
+ puts "\tSubdb011.b: Verify overflow pages"
+ foreach subdb $slist {
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+
+ # What everyone else calls overflow pages, hash calls "big
+ # pages", so we need to special-case hash here. (Hash
+ # overflow pages are additional pages after the first in a
+ # bucket.)
+ if { [string compare [$db get_type] hash] == 0 } {
+ error_check_bad overflow \
+ [is_substr $stat "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good dbclose [$db close] 0
+ }
+
+ puts "\tSubdb011.c: Delete subdatabases"
+ for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} {
+ #
+ # Randomly delete a subdatabase
+ set sindex [berkdb random_int 0 [expr $i - 1]]
+ set subdb [lindex $slist $sindex]
+ #
+ # Delete the one we did from the list
+ set slist [lreplace $slist $sindex $sindex]
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ {$testfile $subdb}] 0
+ }
+}
+
diff --git a/db-4.8.30/test/sdb012.tcl b/db-4.8.30/test/sdb012.tcl
new file mode 100644
index 0000000..3cd4000
--- /dev/null
+++ b/db-4.8.30/test/sdb012.tcl
@@ -0,0 +1,434 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb012
+# TEST Test subdbs with locking and transactions
+# TEST Tests creating and removing subdbs while handles
+# TEST are open works correctly, and in the face of txns.
+#
+proc sdb012 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb012: skipping for method $method"
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb012 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set largs [split_encargs $args encargs]
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ #
+ # sdb012_body takes a txn list containing 4 elements.
+ # {txn command for first subdb
+ # txn command for second subdb
+ # txn command for first subdb removal
+ # txn command for second subdb removal}
+ #
+ # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'.
+ # 'none' is a special case meaning run without a txn. In the
+ # case where all 4 items are 'none', we run in a lock-only env.
+ # 'one' is a special case meaning we create the subdbs together
+ # in one single transaction. It is indicated as the value for t1,
+ # and the value in t2 indicates if that single txn should be
+ # aborted or committed. It is not used and has no meaning
+ # in the removal case. 'auto' means use the -auto_commit flag
+ # to the operation, and 'abort' and 'commit' do the obvious.
+ # "-auto" is applied only to the creation of the subdbs, since
+ # it is done by default on database removes in transactional
+ # environments.
+ #
+ # First test locking w/o txns. If any in tlist are 'none',
+ # all must be none.
+ #
+ # Now run through the txn-based operations
+ set count 0
+ set sdb "Subdb012."
+ set teststr "abcdefghijklmnopqrstuvwxyz"
+ set testlet [split $teststr {}]
+ foreach t1 { none one abort auto commit } {
+ foreach t2 { none abort auto commit } {
+ if { $t1 == "one" } {
+ if { $t2 == "none" || $t2 == "auto"} {
+ continue
+ }
+ }
+ set tlet [lindex $testlet $count]
+ foreach r1 { none abort commit } {
+ foreach r2 { none abort commit } {
+ set tlist [list $t1 $t2 $r1 $r2]
+ set nnone [llength \
+ [lsearch -all $tlist none]]
+ if { $nnone != 0 && $nnone != 4 } {
+ continue
+ }
+ sdb012_body $testdir $omethod $largs \
+ $encargs $sdb$tlet $tlist
+ }
+ }
+ incr count
+ }
+ }
+
+}
+
+proc s012 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ set encargs ""
+ set largs ""
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ set sdb "Subdb012."
+ set tlet X
+ set tlist $args
+ error_check_good tlist [llength $tlist] 4
+ sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist
+}
+
+#
+# This proc checks the tlist values and returns the flags
+# that should be used when opening the env. If we are running
+# with no txns, then just -lock, otherwise -txn.
+#
+proc sdb012_subsys { tlist } {
+ set t1 [lindex $tlist 0]
+ #
+ # If we have no txns, all elements of the list should be none.
+ # In that case we only run with locking turned on.
+ # Otherwise, we use the full txn subsystems.
+ #
+ set allnone {none none none none}
+ if { $allnone == $tlist } {
+ set subsys "-lock"
+ } else {
+ set subsys "-txn"
+ }
+ return $subsys
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in operations. I.e. it will begin the txns as
+# needed, or return a -auto_commit flag, etc.
+#
+proc sdb012_tflags { env tlist } {
+ set ret ""
+ set t1 ""
+ foreach t $tlist {
+ switch $t {
+ one {
+ set t1 [$env txn]
+ error_check_good txnbegin [is_valid_txn $t1 $env] TRUE
+ lappend ret "-txn $t1"
+ lappend ret "-txn $t1"
+ }
+ auto {
+ lappend ret "-auto_commit"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one", skip over
+ # this commit/abort. Otherwise start a new txn
+ # for the removal case.
+ #
+ if { $t1 == "" } {
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn \
+ $env] TRUE
+ lappend ret "-txn $txn"
+ } else {
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret ""
+ }
+ default {
+ error "Txn command $t not implemented"
+ }
+ }
+ }
+ return $ret
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in the txn conclusion operations. I.e. it will
+# give "" if using auto_commit (i.e. no final txn op), or a single
+# abort/commit if both subdb's are in one txn.
+#
+proc sdb012_top { tflags tlist } {
+ set ret ""
+ set t1 ""
+ #
+ # We know both lists have 4 items. Iterate over them
+ # using multiple value lists so we know which txn goes
+ # with each op.
+ #
+ # The tflags list is needed to extract the txn command
+ # out for the operation. The tlist list is needed to
+ # determine what operation we are doing.
+ #
+ foreach t $tlist tf $tflags {
+ switch $t {
+ one {
+ set t1 [lindex $tf 1]
+ }
+ auto {
+ lappend ret "sdb012_nop"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one" (i.e. t1
+ # is set), append a correct command and then
+ # an empty one.
+ #
+ if { $t1 == "" } {
+ set txn [lindex $tf 1]
+ set top "$txn $t"
+ lappend ret $top
+ } else {
+ set top "$t1 $t"
+ lappend ret "sdb012_nop"
+ lappend ret $top
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret "sdb012_nop"
+ }
+ }
+ }
+ return $ret
+}
+
+proc sdb012_nop { } {
+ return 0
+}
+
+proc sdb012_isabort { tlist item } {
+ set i [lindex $tlist $item]
+ if { $i == "one" } {
+ set i [lindex $tlist [expr $item + 1]]
+ }
+ if { $i == "abort" } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc sdb012_body { testdir omethod largs encargs msg tlist } {
+
+ puts "\t$msg: $tlist"
+ set testfile subdb012.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ set subsys [sdb012_subsys $tlist]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $subsys $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good test_lock [$env test abort subdb_lock] 0
+
+ #
+ # Convert from our tlist txn commands into real flags we
+ # will pass to commands. Use the multiple values feature
+ # of foreach to do this efficiently.
+ #
+ set tflags [sdb012_tflags $env $tlist]
+ foreach {txn1 txn2 rem1 rem2} $tflags {break}
+ foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break}
+
+# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2"
+# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2"
+ puts "\t$msg.0: Create sub databases in env with $subsys"
+ set s1 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn1 {$omethod $testfile $subdb1}]
+ error_check_good dbopen [is_valid_db $s1] TRUE
+
+ set ret [eval $top1]
+ error_check_good t1_end $ret 0
+
+ set s2 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn2 {$omethod $testfile $subdb2}]
+ error_check_good dbopen [is_valid_db $s2] TRUE
+
+ puts "\t$msg.1: Subdbs are open; resolve txns if necessary"
+ set ret [eval $top2]
+ error_check_good t2_end $ret 0
+
+ set t1_isabort [sdb012_isabort $tlist 0]
+ set t2_isabort [sdb012_isabort $tlist 1]
+ set r1_isabort [sdb012_isabort $tlist 2]
+ set r2_isabort [sdb012_isabort $tlist 3]
+
+# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort"
+
+ puts "\t$msg.2: Subdbs are open; verify removal failures"
+ # Verify removes of subdbs with open subdb's fail
+ #
+ # We should fail no matter what. If we aborted, then the
+ # subdb should not exist. If we didn't abort, we should fail
+ # with DB_LOCK_NOTGRANTED.
+ #
+ # XXX - Do we need -auto_commit for all these failing ones?
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ error_check_bad dbremove2_open $r 0
+ if { $t2_isabort } {
+ error_check_good dbremove2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # Verify file remove fails
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ error_check_bad dbremovef_open $r 0
+
+ #
+ # If both aborted, there should be no file??
+ #
+ if { $t1_isabort && $t2_isabort } {
+ error_check_good dbremovef_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremovef_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ puts "\t$msg.3: Close subdb2; verify removals"
+ error_check_good close_s2 [$s2 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem2 $testfile $subdb2} result ]
+ if { $t2_isabort } {
+ error_check_bad dbrem2_ab $r 0
+ error_check_good dbrem2_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbrem2 $result 0
+ }
+ # Resolve subdb2 removal txn
+ set r [eval $rop2]
+ error_check_good rop2 $r 0
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1.2_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # There are three cases here:
+ # 1. if both t1 and t2 aborted, the file shouldn't exist
+ # 2. if only t1 aborted, the file still exists and nothing is open
+ # 3. if neither aborted a remove should fail because the first
+ # subdb is still open
+ # In case 2, don't try the remove, because it should succeed
+ # and we won't be able to test anything else.
+ if { !$t1_isabort || $t2_isabort } {
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+ }
+
+ puts "\t$msg.4: Close subdb1; verify removals"
+ error_check_good close_s1 [$s1 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem1 $testfile $subdb1} result ]
+ if { $t1_isabort } {
+ error_check_bad dbremove1_ab $r 0
+ error_check_good dbremove1_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1 $result 0
+ }
+ # Resolve subdb1 removal txn
+ set r [eval $rop1]
+ error_check_good rop1 $r 0
+
+ # Verify removal of subdb2. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb2 above was successful and subdb2
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb2 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ if { $r2_isabort && !$t2_isabort } {
+ error_check_good dbremove2.1_ab $result 0
+ } else {
+ error_check_bad dbremove2.1 $r 0
+ error_check_good dbremove2.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ # Verify removal of subdb1. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb1 above was successful and subdb1
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb1 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ if { $r1_isabort && !$t1_isabort } {
+ error_check_good dbremove1.1 $result 0
+ } else {
+ error_check_bad dbremove_open $r 0
+ error_check_good dbremove.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ puts "\t$msg.5: All closed; remove file"
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremove_final_ab $r 0
+ error_check_good dbremove_file_abstr [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove_final $r 0
+ }
+ error_check_good envclose [$env close] 0
+}
diff --git a/db-4.8.30/test/sdb013.tcl b/db-4.8.30/test/sdb013.tcl
new file mode 100644
index 0000000..1f1ee89
--- /dev/null
+++ b/db-4.8.30/test/sdb013.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb013
+# TEST Tests in-memory subdatabases.
+# TEST Create an in-memory subdb. Test for persistence after
+# TEST overflowing the cache. Test for conflicts when we have
+# TEST two in-memory files.
+
+proc sdb013 { method { nentries 10 } args } {
+ source ./include.tcl
+
+ set tnum "013"
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queueext $method] == 1 } {
+ puts "Subdb$tnum: skipping for method $method"
+ return
+ }
+
+ puts "Subdb$tnum: $method ($args) in-memory subdb tests"
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb$tnum skipping for env $env"
+ return
+ }
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ # Create the env, with a very small cache that we can easily
+ # fill. If a particularly large page size is specified, make
+ # the cache a little larger, but still on the small side.
+ env_cleanup $testdir
+ set csize {0 65536 1}
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ set pagesize [lindex $args $pgindex]
+ if { $pagesize > 8192 } {
+ set cache [expr 4 * $pagesize]
+ set csize "0 $cache 1"
+ }
+ }
+
+ set env [berkdb_env_noerr -create -cachesize $csize -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Set filename to NULL; this causes the creation of an in-memory
+ # subdb.
+ set testfile ""
+ set subdb subdb0
+
+ puts "\tSubdb$tnum.a: Create in-mem subdb, add data, close."
+ set sdb [eval {berkdb_open_noerr -create -mode 0644} \
+ $args -env $env -auto_commit {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $sdb] TRUE
+
+ set ret [sdb013_populate $sdb $method $nentries]
+ error_check_good populate $ret 0
+ error_check_good sdb_close [$sdb close] 0
+
+ # Do a bunch of writing to evict all pages from the memory pool.
+ puts "\tSubdb$tnum.b: Create another db, overflow the cache."
+ set dummyfile foo.db
+ set db [eval {berkdb_open_noerr -create -mode 0644} $args -env $env\
+ -auto_commit $omethod $dummyfile]
+ error_check_good dummy_open [is_valid_db $db] TRUE
+
+ set entries [expr $nentries * 100]
+ set ret [sdb013_populate $db $method $entries]
+ error_check_good dummy_close [$db close] 0
+
+ # Make sure we can still open the in-memory subdb.
+ puts "\tSubdb$tnum.c: Check we can still open the in-mem subdb."
+ set sdb [eval {berkdb_open_noerr} \
+ $args -env $env -auto_commit {$omethod $testfile $subdb}]
+ error_check_good sdb_reopen [is_valid_db $sdb] TRUE
+ error_check_good sdb_close [$sdb close] 0
+
+ # Exercise the -m (dump in-memory) option on db_dump.
+ puts "\tSubdb$tnum.d: Exercise in-memory db_dump."
+ set stat \
+ [catch {eval {exec $util_path/db_dump} -h $testdir -m $subdb} res]
+ error_check_good dump_successful $stat 0
+
+ puts "\tSubdb$tnum.e: Remove in-mem subdb."
+ error_check_good \
+ sdb_remove [berkdb dbremove -env $env $testfile $subdb] 0
+
+ puts "\tSubdb$tnum.f: Check we cannot open the in-mem subdb."
+ set ret [catch {eval {berkdb_open_noerr} -env $env $args \
+ -auto_commit {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+
+ foreach end { commit abort } {
+ # Create an in-memory database.
+ puts "\tSubdb$tnum.g: Create in-mem subdb, add data, close."
+ set sdb [eval {berkdb_open_noerr -create -mode 0644} \
+ $args -env $env -auto_commit {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $sdb] TRUE
+
+ set ret [sdb013_populate $sdb $method $nentries]
+ error_check_good populate $ret 0
+ error_check_good sdb_close [$sdb close] 0
+
+ # Transactionally remove the database.
+ puts "\tSubdb$tnum.h: Transactionally remove in-mem database."
+ set txn [$env txn]
+ error_check_good db_remove \
+ [berkdb dbremove -env $env -txn $txn $testfile $subdb] 0
+
+ # Write a cacheful of data.
+ puts "\tSubdb$tnum.i: Create another db, overflow the cache."
+ set db [eval {berkdb_open_noerr -create -mode 0644} $args \
+ -env $env -auto_commit $omethod $dummyfile]
+ error_check_good dummy_open [is_valid_db $db] TRUE
+
+ set entries [expr $nentries * 100]
+ set ret [sdb013_populate $db $method $entries]
+ error_check_good dummy_close [$db close] 0
+
+ # Finish the txn and make sure the database is either
+ # gone (if committed) or still there (if aborted).
+ error_check_good txn_$end [$txn $end] 0
+ if { $end == "abort" } {
+ puts "\tSubdb$tnum.j: Check that database still exists."
+ set sdb [eval {berkdb_open_noerr} $args \
+ -env $env -auto_commit {$omethod $testfile $subdb}]
+ error_check_good sdb_reopen [is_valid_db $sdb] TRUE
+ error_check_good sdb_close [$sdb close] 0
+ } else {
+ puts "\tSubdb$tnum.j: Check that database is gone."
+ set ret [catch {eval {berkdb_open_noerr} -env $env \
+ $args -auto_commit {$omethod $testfile $subdb}} res]
+ error_check_bad dbopen $ret 0
+ }
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc sdb013_populate { db method nentries } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set r [ catch {$db put $key [chop_data $method $str]} ret ]
+ if { $r != 0 } {
+ close $did
+ return $ret
+ }
+
+ incr count
+ }
+ close $did
+ return 0
+}
+
diff --git a/db-4.8.30/test/sdb014.tcl b/db-4.8.30/test/sdb014.tcl
new file mode 100644
index 0000000..be5ded4
--- /dev/null
+++ b/db-4.8.30/test/sdb014.tcl
@@ -0,0 +1,112 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb014
+# TEST Tests mixing in-memory named and in-memory unnamed dbs.
+# TEST Create a regular in-memory db, add data.
+# TEST Create a named in-memory db.
+# TEST Try to create the same named in-memory db again (should fail).
+# TEST Try to create a different named in-memory db (should succeed).
+# TEST
+proc sdb014 { method args } {
+ source ./include.tcl
+
+ set tnum "014"
+ set orig_tdir $testdir
+ if { [is_queueext $method] == 1 } {
+ puts "Subdb$tnum: skipping for method $method"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ puts "Subdb$tnum ($method $args):\
+ In-memory named dbs with regular in-mem dbs."
+
+ # If we are given an env, use it. Otherwise, open one.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "\tSubdb$tnum.a: Create and populate in-memory unnamed database."
+ set testfile ""
+ set db [eval {berkdb_open -env $env -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ while { [gets $did str] != -1 && $count < 5 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+
+ # Create named in-memory db. Try to create a second in-memory db of
+ # the same name. Should fail.
+ puts "\tSubdb$tnum.b: Create in-memory named database."
+ set subdb "SUBDB"
+ set db [eval {berkdb_open -env $env -create -excl -mode 0644} \
+ $args $omethod {$testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tSubdb$tnum.c: Try to create second inmem database."
+ set ret [catch {eval {berkdb_open_noerr -env $env -create -excl \
+ -mode 0644} $args {$omethod $testfile $subdb}} db1]
+ error_check_bad dbopen $ret 0
+
+ # Clean up. Close the env if this test created it.
+ error_check_good db_close [$db close] 0
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+
+ set testdir $orig_tdir
+ return
+}
+
diff --git a/db-4.8.30/test/sdb015.tcl b/db-4.8.30/test/sdb015.tcl
new file mode 100644
index 0000000..2fa8f27
--- /dev/null
+++ b/db-4.8.30/test/sdb015.tcl
@@ -0,0 +1,117 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb015
+# TEST Tests basic in-memory named database functionality
+# TEST Small keys, small data
+# TEST Put/get per key
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST Then repeat using an environment.
+proc sdb015 { method {nentries 1000} args } {
+ global passwd
+ global has_crypto
+
+ if { [is_queueext $method] == 1 } {
+ puts "Subdb015: skipping for method $method"
+ return
+ }
+
+ # Skip test if given an env - this test needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb015 skipping for env $env"
+ return
+ }
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ set largs $args
+ subdb015_main $method $nentries $largs
+
+ # Skip remainder of test if release does not support encryption.
+ if { $has_crypto == 0 } {
+ return
+ }
+
+ append largs " -encryptaes $passwd "
+ subdb015_main $method $nentries $largs
+}
+
+proc subdb015_main { method nentries largs } {
+ source ./include.tcl
+ global encrypt
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+
+ # Run convert_encrypt so that old_encrypt will be reset to
+ # the proper value and cleanup will work.
+ convert_encrypt $largs
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ set env [eval {berkdb_env -create -cachesize {0 10000000 0} \
+ -mode 0644} -home $testdir $encargs]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ puts "Subdb015: $method ($largs) basic in-memory named db tests."
+ subdb015_body $method $omethod $nentries $largs $env
+ error_check_good env_close [$env close] 0
+}
+
+proc subdb015_body { method omethod nentries largs env } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ # Create the database and open the dictionary
+ set subdb subdb0
+ set db [eval {berkdb_open -create -mode 0644} $largs \
+ {-env $env $omethod "" $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set pflags ""
+ set gflags ""
+ set count 0
+
+ puts "\tSubdb015.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+}
+
diff --git a/db-4.8.30/test/sdb016.tcl b/db-4.8.30/test/sdb016.tcl
new file mode 100644
index 0000000..4df96d0
--- /dev/null
+++ b/db-4.8.30/test/sdb016.tcl
@@ -0,0 +1,98 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb016
+# TEST Creates many in-memory named dbs and puts a small amount of
+# TEST data in each (many defaults to 100)
+# TEST
+# TEST Use the first 100 entries from the dictionary as names.
+# TEST Insert each with entry as name of subdatabase and a partial list
+# TEST as key/data. After all are entered, retrieve all; compare output
+# TEST to original.
+proc sdb016 { method {nentries 100} args } {
+ source ./include.tcl
+
+ set tnum "016"
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queueext $method] == 1 } {
+ puts "Subdb$tnum skipping for method $method"
+ return
+ }
+
+ puts "Subdb$tnum: $method ($args) many in-memory named databases"
+
+ # Skip test if given an env - this test needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "\tSubdb$tnum skipping for env $env"
+ return
+ }
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ env_cleanup $testdir
+
+ # Set up env. We'll need a big cache.
+ set csize {0 16777216 1}
+ set env [berkdb_env -create \
+ -cachesize $csize -home $testdir -mode 0644 -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set fcount 0
+
+ # Here is the loop where we put and get each key/data pair
+ set ndataent 5
+ set fdid [open $dict]
+ puts "\tSubdb$tnum.a: Open $nentries in-memory databases."
+ while { [gets $fdid str] != -1 && $fcount < $nentries } {
+ if { $str == "" } {
+ continue
+ }
+ set subdb $str
+ set db [eval {berkdb_open -create -mode 0644} \
+ -env $env $args {$omethod "" $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $ndataent } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+ incr fcount
+ }
+ close $fdid
+
+ puts "\tSubdb$tnum.b: Clean up."
+ error_check_good env_close [$env close] 0
+}
+
diff --git a/db-4.8.30/test/sdb017.tcl b/db-4.8.30/test/sdb017.tcl
new file mode 100644
index 0000000..51f8906
--- /dev/null
+++ b/db-4.8.30/test/sdb017.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb017
+# TEST Test DB->rename() for in-memory named databases.
+proc sdb017 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ if { [is_queueext $method] == 1 } {
+ puts "Subdb017: Skipping for method $method"
+ return
+ }
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Subdb017: $method ($args): DB->rename() for in-memory named dbs."
+
+ # Skip test if given an env - this test needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb017 skipping for env $env"
+ return
+ }
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ # Make sure we're starting from a clean slate.
+ env_cleanup $testdir
+
+ # Set up env.
+ set env [berkdb_env_noerr -create -home $testdir -mode 0644]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set oldsdb OLDDB
+ set newsdb NEWDB
+
+ puts "\tSubdb017.a: Create/rename file"
+ puts "\t\tSubdb017.a.1: create"
+ set testfile ""
+ set db [eval {berkdb_open_noerr -create -mode 0644}\
+ $omethod -env $env $args {$testfile $oldsdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [eval {$db put} $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tSubdb017.a.2: rename"
+ error_check_good rename_file [eval {berkdb dbrename} -env $env \
+ {$testfile $oldsdb $newsdb}] 0
+
+ puts "\t\tSubdb017.a.3: check"
+ # Open again with create to make sure we've really completely
+ # disassociated the subdb from the old name.
+ set odb [eval {berkdb_open_noerr -create -mode 0644}\
+ $omethod -env $env $args {$testfile $oldsdb}]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ set odbt [$odb get $key]
+ error_check_good odb_close [$odb close] 0
+
+ set ndb [eval {berkdb_open_noerr -mode 0644}\
+ $omethod -env $env $args {$testfile $newsdb}]
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+ set ndbt [$ndb get $key]
+ error_check_good ndb_close [$ndb close] 0
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ # Now there's both an old and a new. Rename the "new" to the "old"
+ # and make sure that fails.
+ puts "\tSubdb017.b: Make sure rename fails instead of overwriting"
+ set errorCode NONE
+ set ret [catch {eval {berkdb dbrename} -env $env \
+ {$testfile $oldsdb $newsdb}} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ error_check_good env_close [$env close] 0
+}
+
diff --git a/db-4.8.30/test/sdb018.tcl b/db-4.8.30/test/sdb018.tcl
new file mode 100644
index 0000000..6ffb2e6
--- /dev/null
+++ b/db-4.8.30/test/sdb018.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb018
+# TEST Tests join of in-memory named databases.
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# TEST everything else does as well. We'll create test databases called
+# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+# TEST ... where N is the number of the database. Primary.db is the primary
+# TEST database, and sub0.db is the database that has no matching duplicates.
+# TEST All of these are within a single database.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc sdb018 {method {nentries 100} args } {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "\tSubdb018 skipping for method $method."
+ return
+ }
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are given an env, then skip this test. It needs its own.
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb016 skipping for env $env"
+ return
+ }
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ set testfile ""
+ berkdb srand $rand_init
+
+ foreach opt {" -dup" " -dupsort"} {
+ env_cleanup $testdir
+
+ set cache [expr 1024 * 1024 * 10]
+ set env [berkdb_env -create -home $testdir \
+ -cachesize "0 $cache 1" ]
+ append args $opt
+ set oargs $args
+ append oargs " -env $env"
+
+
+ puts "Subdb018: $method ( $args ) Intra-subdb join"
+ set txn ""
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb018.a: Intra-subdb join"
+
+ set psize 8192
+ set duplist {0 50 25 16 12}
+ set numdb [llength $duplist]
+ build_all_subdb $testfile [list $method] $psize \
+ $duplist $nentries $oargs
+
+ # Build the primary
+ puts "Subdb018: Building the primary database $method"
+ set oflags "-create -mode 0644 [conv $omethod \
+ [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags $oargs {$testfile} primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ set key [format "%04d" $i]
+ set ret [eval {$db put} $txn {$key stub}]
+ error_check_good "primary put" $ret 0
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join_subdb $testfile primary.db "1 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "0 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
+
+ close $did
+ error_check_good env_close [$env close] 0
+ }
+}
+
diff --git a/db-4.8.30/test/sdb019.tcl b/db-4.8.30/test/sdb019.tcl
new file mode 100644
index 0000000..760366d
--- /dev/null
+++ b/db-4.8.30/test/sdb019.tcl
@@ -0,0 +1,139 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb019
+# TEST Tests in-memory subdatabases.
+# TEST Create an in-memory subdb. Test for persistence after
+# TEST overflowing the cache. Test for conflicts when we have
+# TEST two in-memory files.
+
+proc sdb019 { method { nentries 100 } args } {
+ source ./include.tcl
+
+ set tnum "019"
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queueext $method] == 1 } {
+ puts "Subdb$tnum: skipping for method $method"
+ return
+ }
+ puts "Subdb$tnum: $method ($args) in-memory subdb tests"
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb019 skipping for env $env"
+ return
+ }
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ # The standard cachesize isn't big enough for 64k pages.
+ set csize "0 262144 1"
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ set pagesize [lindex $args $pgindex]
+ if { $pagesize > 16384 } {
+ set cache [expr 8 * $pagesize]
+ set csize "0 $cache 1"
+ }
+ }
+
+ # Create the env.
+ env_cleanup $testdir
+ set env [eval berkdb_env -create {-cachesize $csize} \
+ -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Set filename to NULL; this allows us to create an in-memory
+ # named database.
+ set testfile ""
+
+ # Create two in-memory subdb and test for conflicts. Try all the
+ # combinations of named (NULL/NAME) and purely temporary
+ # (NULL/NULL) databases.
+ #
+ foreach s1 { S1 "" } {
+ foreach s2 { S2 "" } {
+ puts "\tSubdb$tnum.a:\
+ 2 in-memory subdbs (NULL/$s1, NULL/$s2)."
+ set sdb1 [eval {berkdb_open -create -mode 0644} \
+ $args -env $env {$omethod $testfile $s1}]
+ error_check_good sdb1_open [is_valid_db $sdb1] TRUE
+ set sdb2 [eval {berkdb_open -create -mode 0644} \
+ $args -env $env {$omethod $testfile $s2}]
+ error_check_good sdb1_open [is_valid_db $sdb2] TRUE
+
+ # Subdatabases are open, now put something in.
+ set string1 STRING1
+ set string2 STRING2
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ error_check_good sdb1_put [$sdb1 put $key \
+ [chop_data $method $string1.$key]] 0
+ error_check_good sdb2_put [$sdb2 put $key \
+ [chop_data $method $string2.$key]] 0
+ }
+
+ # If the subs are both NULL/NULL, we have two handles
+ # on the same db. Skip testing the contents.
+ if { $s1 != "" || $s2 != "" } {
+ # This can't work when both subs are NULL/NULL.
+ # Check contents.
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set ret1 [lindex \
+ [lindex [$sdb1 get $key] 0] 1]
+ error_check_good sdb1_get $ret1 \
+ [pad_data $method $string1.$key]
+ set ret2 [lindex \
+ [lindex [$sdb2 get $key] 0] 1]
+ error_check_good sdb2_get $ret2 \
+ [pad_data $method $string2.$key]
+ }
+
+ error_check_good sdb1_close [$sdb1 close] 0
+ error_check_good sdb2_close [$sdb2 close] 0
+
+ # Reopen, make sure we get the right data.
+ set sdb1 [eval {berkdb_open -mode 0644} \
+ $args -env $env {$omethod $testfile $s1}]
+ error_check_good \
+ sdb1_open [is_valid_db $sdb1] TRUE
+ set sdb2 [eval {berkdb_open -mode 0644} \
+ $args -env $env {$omethod $testfile $s2}]
+ error_check_good \
+ sdb1_open [is_valid_db $sdb2] TRUE
+
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set ret1 [lindex \
+ [lindex [$sdb1 get $key] 0] 1]
+ error_check_good sdb1_get $ret1 \
+ [pad_data $method $string1.$key]
+ set ret2 [lindex \
+ [lindex [$sdb2 get $key] 0] 1]
+ error_check_good sdb2_get $ret2 \
+ [pad_data $method $string2.$key]
+ }
+ }
+ error_check_good sdb1_close [$sdb1 close] 0
+ error_check_good sdb2_close [$sdb2 close] 0
+ }
+ }
+ error_check_good env_close [$env close] 0
+}
+
diff --git a/db-4.8.30/test/sdb020.tcl b/db-4.8.30/test/sdb020.tcl
new file mode 100644
index 0000000..3eb8840
--- /dev/null
+++ b/db-4.8.30/test/sdb020.tcl
@@ -0,0 +1,124 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdb020
+# TEST Tests in-memory subdatabases.
+# TEST Create an in-memory subdb with one page size. Close, and
+# TEST open with a different page size: should fail.
+
+proc sdb020 { method { nentries 10 } args } {
+ source ./include.tcl
+ global errorCode
+
+ set tnum "020"
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queueext $method] == 1 } {
+ puts "Subdb$tnum: skipping for method $method"
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Subdb$tnum: skipping for specific page sizes."
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb020 skipping for env $env"
+ return
+ }
+
+ # In-memory dbs never go to disk, so we can't do checksumming.
+ # If the test module sent in the -chksum arg, get rid of it.
+ set chkindex [lsearch -exact $args "-chksum"]
+ if { $chkindex != -1 } {
+ set args [lreplace $args $chkindex $chkindex]
+ }
+
+ puts "Subdb$tnum: $method ($args) \
+ in-memory named db tests with different pagesizes"
+
+ # Create the env.
+ env_cleanup $testdir
+ set env [berkdb_env_noerr -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Set filename to NULL; this causes the creation of an in-memory
+ # subdb.
+ set testfile ""
+ set name NAME
+
+ puts "\tSubdb$tnum.a: Create in-mem named db with default page size."
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $args -env $env {$omethod $testfile $name}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Figure out the default page size so we can try to open
+ # later with a different value.
+ set psize [stat_field $db stat "Page size"]
+ if { $psize == 512 } {
+ set psize2 2048
+ } else {
+ set psize2 512
+ }
+
+ error_check_good db_close [$db close] 0
+
+ # Try to open again with a different page size (should fail).
+ puts "\tSubdb$tnum.b: Try to reopen with different page size."
+ set errorCode NONE
+ catch {set db [eval {berkdb_open_noerr} $args -env $env \
+ -pagesize $psize2 {$omethod $testfile $name}]} res
+ error_check_good expect_error [is_substr $errorCode EINVAL] 1
+
+ # Try to open again with the correct pagesize (should succeed).
+ puts "\tSubdb$tnum.c: Reopen with original page size."
+ set db [eval {berkdb_open_noerr} $args -env $env \
+ -pagesize $psize {$omethod $testfile $name}]
+ # Close DB
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb$tnum.d: Create in-mem named db with specific page size."
+ set psize 8192
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $args -env $env -pagesize $psize {$omethod $testfile $name}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Try to open again with a different page size (should fail).
+ set psize2 [expr $psize / 2]
+ puts "\tSubdb$tnum.e: Try to reopen with different page size."
+ set errorCode NONE
+ catch {set db [eval {berkdb_open_noerr} $args -env $env \
+ -pagesize $psize2 {$omethod $testfile $name}]} res
+ error_check_good expect_error [is_substr $errorCode EINVAL] 1
+
+ # Try to open again with the correct pagesize (should succeed).
+ puts "\tSubdb$tnum.f: Reopen with original page size."
+ set db [eval {berkdb_open} $args -env $env \
+ -pagesize $psize {$omethod $testfile $name}]
+
+ # Try to open a different database with a different page size
+ # (should succeed).
+ puts "\tSubdb$tnum.g: Open different db with different page size."
+ set newname NEWNAME
+ set db2 [eval {berkdb_open} -create $args -env $env \
+ -pagesize $psize2 {$omethod $testfile $newname}]
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good db2_close [$db2 close] 0
+ error_check_good env_close [$env close] 0
+}
+
+
diff --git a/db-4.8.30/test/sdbscript.tcl b/db-4.8.30/test/sdbscript.tcl
new file mode 100644
index 0000000..2e2f34b
--- /dev/null
+++ b/db-4.8.30/test/sdbscript.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Usage: subdbscript testfile subdbnumber factor
+# testfile: name of DB itself
+# subdbnumber: n, subdb indicator, of form sub$n.db
+# factor: Delete over factor'th + n'th from my subdb.
+#
+# I.e. if factor is 10, and n is 0, remove entries, 0, 10, 20, ...
+# if factor is 10 and n is 1, remove entries 1, 11, 21, ...
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "subdbscript testfile subdbnumber factor"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set testfile [lindex $argv 0]
+set n [ lindex $argv 1 ]
+set factor [ lindex $argv 2 ]
+
+set db [berkdb_open -unknown $testfile sub$n.db]
+error_check_good db_open [is_valid_db $db] TRUE
+
+set dbc [$db cursor]
+error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+set i 1
+for {set d [$dbc get -first]} {[llength $d] != 0} {set d [$dbc get -next]} {
+ set x [expr $i - $n]
+ if { $x >= 0 && [expr $x % $factor] == 0 } {
+ puts "Deleting $d"
+ error_check_good dbc_del [$dbc del] 0
+ }
+ incr i
+}
+error_check_good db_close [$db close] 0
+
+exit
diff --git a/db-4.8.30/test/sdbtest001.tcl b/db-4.8.30/test/sdbtest001.tcl
new file mode 100644
index 0000000..a7fd39c
--- /dev/null
+++ b/db-4.8.30/test/sdbtest001.tcl
@@ -0,0 +1,149 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdbtest001
+# TEST Tests multiple access methods in one subdb
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Dump file, verify per subdb
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Rotate methods and repeat [#762].
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest001 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest001: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest001.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ lappend method_list [list "-rrecno" "-rbtree" "-hash" "-recno" "-btree"]
+ lappend method_list [list "-recno" "-hash" "-btree" "-rbtree" "-rrecno"]
+ lappend method_list [list "-btree" "-recno" "-rbtree" "-rrecno" "-hash"]
+ lappend method_list [list "-hash" "-recno" "-rbtree" "-rrecno" "-btree"]
+ lappend method_list [list "-rbtree" "-hash" "-btree" "-rrecno" "-recno"]
+ lappend method_list [list "-rrecno" "-recno"]
+ lappend method_list [list "-recno" "-rrecno"]
+ lappend method_list [list "-hash" "-dhash"]
+ lappend method_list [list "-dhash" "-hash"]
+ lappend method_list [list "-rbtree" "-btree" "-dbtree" "-ddbtree"]
+ lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
+ lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
+ lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
+ set plist [list 512 8192 1024 4096 2048 16384]
+ set mlen [llength $method_list]
+ set plen [llength $plist]
+ while { $plen < $mlen } {
+ set plist [concat $plist $plist]
+ set plen [llength $plist]
+ }
+ set pgsz 0
+ foreach methods $method_list {
+ cleanup $testdir NULL
+ puts "\tSubdbtest001.a: create subdbs of different access methods:"
+ puts "\tSubdbtest001.a: $methods"
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set psize [lindex $plist $pgsz]
+ incr pgsz
+ set newent [expr $nentries / $nsubdbs]
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest001_recno.check
+ } else {
+ set checkfunc subdbtest001.check
+ }
+
+ puts "\tSubdbtest001.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the
+ # dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ # filehead uses 1-based line numbers
+ set beg [expr $subdb * $newent]
+ incr beg
+ set end [expr $beg + $newent - 1]
+ filehead $end $dict $t3 $beg
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ }
+}
+
+# Check function for Subdbtest001; keys and data are identical
+proc subdbtest001.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest001_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/sdbtest002.tcl b/db-4.8.30/test/sdbtest002.tcl
new file mode 100644
index 0000000..4c8406d
--- /dev/null
+++ b/db-4.8.30/test/sdbtest002.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sdbtest002
+# TEST Tests multiple access methods in one subdb access by multiple
+# TEST processes.
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Fork off several child procs to each delete selected
+# TEST data from their subdb and then exit
+# TEST Dump file, verify contents of each subdb is correct
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Fork of some child procs to each manipulate one subdb and when
+# TEST they are finished, verify the contents of the databases.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest002 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest002: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest002.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ set methods \
+ [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"]
+ cleanup $testdir NULL
+ puts "\tSubdbtest002.a: create subdbs of different methods: $methods"
+ set psize 4096
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set newent [expr $nentries / $nsubdbs]
+
+ #
+ # XXX We need dict sorted to figure out what was deleted
+ # since things are stored sorted in the btree.
+ #
+ filesort $dict $t4
+ set dictorig $dict
+ set dict $t4
+
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ set pidlist ""
+ puts "\tSubdbtest002.b: create $nsubdbs procs to delete some keys"
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ puts "$tclsh_path\
+ $test_path/sdbscript.tcl $testfile \
+ $subdb $nsubdbs >& $testdir/subdb002.log.$subdb"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ sdbscript.tcl \
+ $testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest002_recno.check
+ } else {
+ set checkfunc subdbtest002.check
+ }
+
+ puts "\tSubdbtest002.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ error_check_good db_open [is_valid_db $db] TRUE
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t4 r]
+ for {set i 1} {[gets $oid line] >= 0} {incr i} {
+ set farr($i) $line
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ # Sed uses 1-based line numbers
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ set beg [expr $subdb * $newent]
+ set beg [expr $beg + $i]
+ puts $oid $farr($beg)
+ }
+ }
+ close $oid
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ set dict $dictorig
+ return
+}
+
+# Check function for Subdbtest002; keys and data are identical
+proc subdbtest002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest002_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/sdbutils.tcl b/db-4.8.30/test/sdbutils.tcl
new file mode 100644
index 0000000..e50177f
--- /dev/null
+++ b/db-4.8.30/test/sdbutils.tcl
@@ -0,0 +1,196 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
+ set nsubdbs [llength $dups]
+ set mlen [llength $methods]
+ set savearg $dbargs
+ for {set i 0} {$i < $nsubdbs} { incr i } {
+ set m [lindex $methods [expr $i % $mlen]]
+ set dbargs $savearg
+ subdb_build $dbname $nentries [lindex $dups $i] \
+ $i $m $psize sub$i.db $dbargs
+ }
+}
+
+proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
+ source ./include.tcl
+
+ set dbargs [convert_args $method $dbargs]
+ set omethod [convert_method $method]
+
+ puts "Method: $method"
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ }
+ # Create the database and open the dictionary
+ set oflags "-create -mode 0644 $omethod \
+ -pagesize $psize $dbargs {$name} $subdb"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ if { $ndups >= 0 } {
+ puts "\tBuilding $method {$name} $subdb. \
+ $nkeys keys with $ndups duplicates at interval of $dup_interval"
+ }
+ if { $ndups < 0 } {
+ puts "\tBuilding $method {$name} $subdb. \
+ $nkeys unique keys of pagesize $psize"
+ #
+ # If ndups is < 0, we want unique keys in each subdb,
+ # so skip ahead in the dict by nkeys * iteration
+ #
+ for { set count 0 } \
+ { $count < [expr $nkeys * $dup_interval] } {
+ incr count} {
+ set ret [gets $did str]
+ if { $ret == -1 } {
+ break
+ }
+ }
+ }
+ set txn ""
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $ndups == 0 } {
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method NODUP]}]
+ error_check_good put $ret 0
+ } elseif { $ndups < 0 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set num [expr $nkeys * $dup_interval]
+ set num [expr $num + $count + 1]
+ set ret [eval {$db put} $txn {$num \
+ [chop_data $method $str]}]
+ set kvals($num) [pad_data $method $str]
+ error_check_good put $ret 0
+ } else {
+ set ret [eval {$db put} $txn \
+ {$str [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join_subdb { db primary subdbs key oargs } {
+ source ./include.tcl
+
+ puts "\tJoining: $subdbs on $key"
+
+ # Open all the databases
+ set p [eval {berkdb_open -unknown} $oargs { $db } $primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ foreach i $subdbs {
+ set jdb [eval {berkdb_open -unknown} $oargs { $db } sub$i.db]
+ error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
+
+ lappend jlist [list $jdb $key]
+ lappend dblist $jdb
+
+ }
+
+ set join_res [eval {$p get_join} $jlist]
+ set ndups [llength $join_res]
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n $subdbs {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ error_check_good number_of_dups:$subdbs $ndups $expected
+
+ #
+ # If we get here, we have the number expected, now loop
+ # through each and see if it is what we expected.
+ #
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set pair [lindex $join_res $i]
+ set k [lindex $pair 0]
+ foreach j $subdbs {
+ error_check_bad valid_dup:$j:$subdbs $j 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good \
+ valid_dup:$j:$subdbs [expr $kval % $j] 0
+ }
+ }
+
+ error_check_good close_primary [$p close] 0
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_subname { n } {
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return sub$n.db;
+ }
+}
diff --git a/db-4.8.30/test/sec001.tcl b/db-4.8.30/test/sec001.tcl
new file mode 100644
index 0000000..c80a971
--- /dev/null
+++ b/db-4.8.30/test/sec001.tcl
@@ -0,0 +1,222 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sec001
+# TEST Test of security interface
+proc sec001 { } {
+ global errorInfo
+ global errorCode
+ global has_crypto
+ global is_hp_test
+
+ source ./include.tcl
+ # Skip test if release does not support encryption.
+ if { $has_crypto == 0 } {
+ puts "Skipping test sec001 for non-crypto release."
+ return
+ }
+
+ set testfile1 env1.db
+ set testfile2 $testdir/env2.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ puts "Sec001: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd1_bad "passwd1_bad"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+
+ #
+ # This first group tests bad create scenarios and also
+ # tests attempting to use encryption after creating a
+ # non-encrypted env/db to begin with.
+ #
+ set nopass ""
+ puts "\tSec001.a.1: Create db with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.a.2: Open db without encryption."
+ set stat [catch {berkdb_open_noerr $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.b.1: Create db without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.b.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.c.1: Create db with checksum."
+ set db [berkdb_open -create -chksum -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.c.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.d.1: Create subdb with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree \
+ $testfile2 $subdb1]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.d.2: Create 2nd subdb without encryption."
+ set stat [catch {berkdb_open_noerr -create -btree \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.e.1: Create subdb without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2 $subdb1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.e.2: Create 2nd subdb with encryption."
+ set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1
+
+ env_cleanup $testdir
+
+ puts "\tSec001.f.1: Open env with encryption, empty passwd."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptaes $nopass} ret]
+ error_check_good env:nopass $stat 1
+ error_check_good env:fail [is_substr $ret "Empty password"] 1
+
+ puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptany $passwd1} ret]
+ error_check_good env:any $stat 1
+ error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1
+
+ puts "\tSec001.f.3: Create without encryption."
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # Skip this piece of the test on HP-UX, where we can't
+ # join the env.
+ if { $is_hp_test != 1 } {
+ puts "\tSec001.f.4: Open again with encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1} ret]
+ error_check_good env:unencrypted $stat 1
+ error_check_good env:fail [is_substr $ret \
+ "Joining non-encrypted environment"] 1
+ }
+
+ error_check_good envclose [$env close] 0
+
+ env_cleanup $testdir
+
+ #
+ # This second group tests creating and opening a secure env.
+ # We test that others can join successfully, and that other's with
+ # bad/no passwords cannot. Also test that we cannot use the
+ # db->set_encrypt method when we've already got a secure dbenv.
+ #
+ puts "\tSec001.g.1: Open with encryption."
+ set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # We can't open an env twice in HP-UX, so skip the rest.
+ if { $is_hp_test == 1 } {
+ puts "Skipping remainder of test for HP-UX."
+ error_check_good env_close [$env close] 0
+ return
+ }
+
+ puts "\tSec001.g.2: Open again with encryption - same passwd."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)."
+ set env1 [berkdb_env -home $testdir -encryptany $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.4: Open with encryption - different length passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.5: Open with encryption - different passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd2} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.6: Open env without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.g.7: Open database with encryption in env"
+ set stat [catch {berkdb_open_noerr -env $env -btree -create \
+ -encryptaes $passwd2 $testfile1} ret]
+ error_check_good db:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "method not permitted"] 1
+
+ puts "\tSec001.g.8: Close creating env"
+ error_check_good envclose [$env close] 0
+
+ #
+ # This third group tests opening the env after the original env
+ # handle is closed. Just to make sure we can reopen it in
+ # the right fashion even if no handles are currently open.
+ #
+ puts "\tSec001.h.1: Reopen without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:noencrypt $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.h.2: Reopen with bad passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \
+ $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.h.3: Reopen with encryption."
+ set env [berkdb_env -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.h.4: 2nd Reopen with encryption."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+
+ error_check_good envclose [$env1 close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tSec001 complete."
+}
diff --git a/db-4.8.30/test/sec002.tcl b/db-4.8.30/test/sec002.tcl
new file mode 100644
index 0000000..b5c8e35
--- /dev/null
+++ b/db-4.8.30/test/sec002.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sec002
+# TEST Test of security interface and catching errors in the
+# TEST face of attackers overwriting parts of existing files.
+proc sec002 { } {
+ global errorInfo
+ global errorCode
+ global has_crypto
+
+ source ./include.tcl
+
+ # Skip test if release does not support encryption.
+ if { $has_crypto == 0 } {
+ puts "Skipping test sec002 for non-crypto release."
+ return
+ }
+
+ set testfile1 $testdir/sec002-1.db
+ set testfile2 $testdir/sec002-2.db
+ set testfile3 $testdir/sec002-3.db
+ set testfile4 $testdir/sec002-4.db
+
+ puts "Sec002: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+ set pagesize 1024
+
+ #
+ # Set up 4 databases, two encrypted, but with different passwords
+ # and one unencrypt, but with checksumming turned on and one
+ # unencrypted and no checksumming. Place the exact same data
+ # in each one.
+ #
+ puts "\tSec002.a: Setup databases"
+ set db_cmd "-create -pagesize $pagesize -btree "
+ set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -chksum $db_cmd $testfile3]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $db_cmd $testfile4]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ #
+ # If we reopen the normal file with the -chksum flag, there
+ # should be no error and checksumming should be ignored.
+ # If we reopen a checksummed file without the -chksum flag,
+ # checksumming should still be in effect. [#6959]
+ #
+ puts "\tSec002.b: Inheritance of chksum properties"
+ puts "\t\tSec002.b1: Reopen ordinary file with -chksum flag"
+ set db [eval {berkdb_open} -chksum $testfile4]
+ error_check_good open_with_chksum [is_valid_db $db] TRUE
+ set retdata [$db get $key]
+ error_check_good testfile4_get [lindex [lindex $retdata 0] 1] $data
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tSec002.b2: Reopen checksummed file without -chksum flag"
+ set db [eval {berkdb_open} $testfile3]
+ error_check_good open_wo_chksum [is_valid_db $db] TRUE
+ set retdata [$db get $key]
+ error_check_good testfile3_get [lindex [lindex $retdata 0] 1] $data
+ error_check_good dbclose [$db close] 0
+
+ #
+ # First just touch some bits in the file. We know that in btree
+ # meta pages, bytes 92-459 are unused. Scribble on them in both
+ # an encrypted, and both unencrypted files. We should get
+ # a checksum error for the encrypted, and checksummed files.
+ # We should get no error for the normal file.
+ #
+ set fidlist {}
+ set fid [open $testfile1 r+]
+ lappend fidlist $fid
+ set fid [open $testfile3 r+]
+ lappend fidlist $fid
+ set fid [open $testfile4 r+]
+ lappend fidlist $fid
+
+ puts "\tSec002.c: Overwrite unused space in meta-page"
+ foreach f $fidlist {
+ fconfigure $f -translation binary
+ seek $f 100 start
+ set byte [read $f 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $f 100 start
+ puts -nonewline $f $newbyte
+ close $f
+ }
+ puts "\tSec002.d: Reopen modified databases"
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret]
+ error_check_good db:$testfile1 $stat 1
+ error_check_good db:$testfile1:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr -chksum $testfile3} ret]
+ error_check_good db:$testfile3 $stat 1
+ error_check_good db:$testfile3:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr $testfile4} db]
+ error_check_good db:$testfile4 $stat 0
+ error_check_good dbclose [$db close] 0
+
+ # Skip the remainder of the test for Windows platforms.
+ # Forcing the error which causes DB_RUNRECOVERY to be
+ # returned ends up leaving open files that cannot be removed.
+ if { $is_windows_test == 1 } {
+ cleanup $testdir NULL 1
+ puts "Skipping remainder of test for Windows"
+ return
+ }
+
+ puts "\tSec002.e: Replace root page in encrypted w/ encrypted"
+ set fid1 [open $testfile1 r+]
+ fconfigure $fid1 -translation binary
+ set fid2 [open $testfile2 r+]
+ fconfigure $fid2 -translation binary
+ seek $fid1 $pagesize start
+ seek $fid2 $pagesize start
+ fcopy $fid1 $fid2 -size $pagesize
+ close $fid1
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail1 \
+ [is_substr $ret "checksum error"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail2 [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ puts "\tSec002.f: Replace root page in encrypted w/ unencrypted"
+ set fid2 [open $testfile2 r+]
+ fconfigure $fid2 -translation binary
+ set fid4 [open $testfile4 r+]
+ fconfigure $fid4 -translation binary
+ seek $fid2 $pagesize start
+ seek $fid4 $pagesize start
+ fcopy $fid4 $fid2 -size $pagesize
+ close $fid4
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ cleanup $testdir NULL 1
+}
diff --git a/db-4.8.30/test/shelltest.tcl b/db-4.8.30/test/shelltest.tcl
new file mode 100644
index 0000000..a93c02a
--- /dev/null
+++ b/db-4.8.30/test/shelltest.tcl
@@ -0,0 +1,105 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST scr###
+# TEST The scr### directories are shell scripts that test a variety of
+# TEST things, including things about the distribution itself. These
+# TEST tests won't run on most systems, so don't even try to run them.
+#
+# shelltest.tcl:
+# Code to run shell script tests, to incorporate Java, C++,
+# example compilation, etc. test scripts into the Tcl framework.
+proc shelltest {{ run_one 0 } { xml 0 }} {
+ source ./include.tcl
+ global shelltest_list
+ global xmlshelltest_list
+
+ set SH /bin/sh
+ if { [file executable $SH] != 1 } {
+ puts "Shell tests require valid shell /bin/sh: not found."
+ puts "Skipping shell tests."
+ return 0
+ }
+
+ if { $xml == 1 } {
+ set shelltest_list $xmlshelltest_list
+ }
+
+ if { $run_one == 0 } {
+ puts "Running shell script tests..."
+
+ foreach testpair $shelltest_list {
+ set dir [lindex $testpair 0]
+ set test [lindex $testpair 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+ } else {
+ set run_one [expr $run_one - 1];
+ set dir [lindex [lindex $shelltest_list $run_one] 0]
+ set test [lindex [lindex $shelltest_list $run_one] 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+}
+
+proc shelltest_copy { fromdir todir } {
+ set globall [glob $fromdir/*]
+
+ foreach f $globall {
+ file copy $f $todir/
+ }
+}
+
+proc shelltest_run { sh srcdir test testdir } {
+ puts "Running shell script $srcdir ($test)..."
+
+ set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
+
+ if { $ret != 0 } {
+ puts "FAIL: shell test $srcdir/$test exited abnormally"
+ }
+}
+
+proc scr001 {} { shelltest 1 }
+proc scr002 {} { shelltest 2 }
+proc scr003 {} { shelltest 3 }
+proc scr004 {} { shelltest 4 }
+proc scr005 {} { shelltest 5 }
+proc scr006 {} { shelltest 6 }
+proc scr007 {} { shelltest 7 }
+proc scr008 {} { shelltest 8 }
+proc scr009 {} { shelltest 9 }
+proc scr010 {} { shelltest 10 }
+proc scr011 {} { shelltest 11 }
+proc scr012 {} { shelltest 12 }
+proc scr013 {} { shelltest 13 }
+proc scr014 {} { shelltest 14 }
+proc scr015 {} { shelltest 15 }
+proc scr016 {} { shelltest 16 }
+proc scr017 {} { shelltest 17 }
+proc scr018 {} { shelltest 18 }
+proc scr019 {} { shelltest 19 }
+proc scr020 {} { shelltest 20 }
+proc scr021 {} { shelltest 21 }
+proc scr022 {} { shelltest 22 }
+proc scr023 {} { shelltest 23 }
+proc scr024 {} { shelltest 24 }
+proc scr025 {} { shelltest 25 }
+proc scr026 {} { shelltest 26 }
+proc scr027 {} { shelltest 27 }
+proc scr028 {} { shelltest 28 }
+proc scr029 {} { shelltest 29 }
+proc scr030 {} { shelltest 30 }
+proc scr031 {} { shelltest 31 }
+proc scr032 {} { shelltest 32 }
+proc scr033 {} { shelltest 33 }
+proc scr034 {} { shelltest 34 }
+proc scr035 {} { shelltest 35 }
diff --git a/db-4.8.30/test/si001.tcl b/db-4.8.30/test/si001.tcl
new file mode 100644
index 0000000..8af6f63
--- /dev/null
+++ b/db-4.8.30/test/si001.tcl
@@ -0,0 +1,282 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si001
+# TEST Secondary index put/delete with lorder test
+# TEST
+# TEST Put data in primary db and check that pget on secondary
+# TEST index finds the right entries. Alter the primary in the
+# TEST following ways, checking for correct data each time:
+# TEST Overwrite data in primary database.
+# TEST Delete half of entries through primary.
+# TEST Delete half of remaining entries through secondary.
+# TEST Append data (for record-based primaries only).
+proc si001 { methods {nentries 200} {tnum "001"} args } {
+ source ./include.tcl
+ global dict nsecondaries
+ global default_pagesize
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ if [big_endian] {
+ set nativeargs " -lorder 4321"
+ set swappedargs " -lorder 1234"
+ } else {
+ set swappedargs " -lorder 4321"
+ set nativeargs " -lorder 1234"
+ }
+ set argtypes "{$nativeargs} {$swappedargs}"
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ set mutexargs " -mutex_set_max 10000 "
+ if { $default_pagesize <= 2048 } {
+ set mutexargs "-mutex_set_max 40000 "
+ }
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set cacheargs " -cachesize {0 4194304 1} "
+ set env [eval {berkdb_env} -create \
+ $cacheargs $mutexargs -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ if { [lsearch -exact $envflags "-thread"] != -1 &&\
+ [is_queue $pmethod] == 1 } {
+ puts "Skipping si$tnum for threaded env"
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ foreach pbyteargs $argtypes {
+ foreach sbyteargs $argtypes {
+ if { $pbyteargs == $nativeargs } {
+ puts "Si$tnum: Using native\
+ byteorder $nativeargs for primary."
+ } else {
+ puts "Si$tnum: Using swapped\
+ byteorder $swappedargs for primary."
+ }
+ if { $sbyteargs == $nativeargs } {
+ puts "Si$tnum: Using native\
+ byteorder $nativeargs for secondaries."
+ } else {
+ puts "Si$tnum: Using swapped\
+ byteorder $swappedargs for secondaries."
+ }
+
+ puts "si$tnum\
+ \{\[ list $pmethod $methods \]\} $nentries"
+ cleanup $testdir $env
+
+ # Open primary.
+ set pdb [eval {berkdb_open -create -env} $env \
+ $pomethod $pargs $pbyteargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } \
+ { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $sbyteargs $snamebase.$i.db]
+ error_check_good\
+ second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSi$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } \
+ { [gets $did str] != -1 && $n < $nentries } \
+ { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put}\
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ check_secondaries\
+ $pdb $sdbs $nentries keys data "Si$tnum.a"
+
+ puts "\tSi$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put}\
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+ check_secondaries\
+ $pdb $sdbs $nentries keys data "Si$tnum.b"
+
+ # Delete the second half of the entries through
+ # the primary. We do the second half so we can
+ # just pass keys(0 ... n/2) to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSi$tnum.c:\
+ Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+ check_secondaries\
+ $pdb $sdbs $half keys data "Si$tnum.c"
+
+ # Delete half of what's left through
+ # the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSi$tnum.d:\
+ Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n)\
+ [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries\
+ $pdb $sdbs $quar keys data "Si$tnum.d"
+ set left $quar
+
+ # For queue and recno only, test append, adding back
+ # a quarter of the original number of entries.
+ if { [is_record_based $pmethod] == 1 } {
+ set did [open $dict]
+ puts "\tSi$tnum.e:\
+ Append loop: append $quar entries"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ # Skip over the dictionary entries
+ # we've already used.
+ gets $did str
+ }
+ for { set n $quar } \
+ { [gets $did str] != -1 && $n < $half } \
+ { incr n } {
+ set key [expr $n + 1]
+ set datum $str
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ check_secondaries\
+ $pdb $sdbs $half keys data "Si$tnum.e"
+ set left $half
+ }
+
+
+ puts "\tSi$tnum.f:\
+ Truncate primary, check secondaries are empty."
+ error_check_good truncate [$pdb truncate] $left
+ foreach sdb $sdbs {
+ set scursor [$sdb cursor]
+ error_check_good\
+ db_cursor [is_substr $scursor $sdb] 1
+ set ret [$scursor get -first]
+ error_check_good\
+ sec_empty [string length $ret] 0
+ error_check_good cursor_close [$scursor close] 0
+ }
+
+
+ puts "\tSi$tnum.g: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+
+ # Don't close the env if this test was given one.
+ # Skip the test of truncating the secondary since
+ # we can't close and reopen the outside env.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+
+ # Reopen with _noerr for test of
+ # truncate secondary.
+ puts "\tSi$tnum.h:\
+ Truncate secondary (should fail)"
+
+ set env [eval {berkdb_env_noerr}\
+ -create $mutexargs -home $testdir]
+ error_check_good\
+ env_open [is_valid_env $env] TRUE
+
+ set pdb [eval {berkdb_open_noerr -create -env}\
+ $env $pomethod $pargs $pname]
+ set sdb [eval {berkdb_open_noerr -create -env}\
+ $env [lindex $omethods 0]\
+ [lindex $argses 0] $snamebase.0.db ]
+ $pdb associate [callback_n 0] $sdb
+
+ set ret [catch {$sdb truncate} ret]
+ error_check_good trunc_secondary $ret 1
+
+ error_check_good primary_close [$pdb close] 0
+ error_check_good secondary_close [$sdb close] 0
+ }
+ }
+ }
+ # If this test made the last env, close it.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/si002.tcl b/db-4.8.30/test/si002.tcl
new file mode 100644
index 0000000..b088c33
--- /dev/null
+++ b/db-4.8.30/test/si002.tcl
@@ -0,0 +1,235 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si002
+# TEST Basic cursor-based secondary index put/delete test
+# TEST
+# TEST Cursor put data in primary db and check that pget
+# TEST on secondary index finds the right entries.
+# TEST Open and use a second cursor to exercise the cursor
+# TEST comparison API on secondaries.
+# TEST Overwrite while walking primary, check pget again.
+# TEST Overwrite while walking secondary (use c_pget), check
+# TEST pget again.
+# TEST Cursor delete half of entries through primary, check.
+# TEST Cursor delete half of remainder through secondary, check.
+proc si002 { methods {nentries 200} {tnum "002"} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ if { [lsearch -exact $envflags "-thread"] != -1 } {
+ puts "Skipping si$tnum for threaded env"
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
+ cleanup $testdir $env
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+
+ # Populate with a cursor, exercising keyfirst/keylast.
+ puts "\tSi$tnum.a: Cursor put (-keyfirst/-keylast) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
+
+ puts "\tSi$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b"
+
+ puts "\tSi$tnum.c: Secondary c_pget/primary put overwrite loop"
+
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ # We also test the cursor comparison API on secondaries.
+ #
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set sdbc2 [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first]; set dbt2 [$sdbc2 pget -first] }\
+ { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next]; set dbt2 [$sdbc2 pget -next] } {
+
+ # Test the cursor comparison API for secondaries
+ # before we overwrite. First they should match;
+ # push one cursor forward, they should not match;
+ # push it back again before the next get.
+ #
+ error_check_good cursor_cmp [$sdbc cmp $sdbc2] 0
+ set ret [$sdbc2 get -next]
+
+ # If the second cursor tried to walk past the last item,
+ # this can't work, so we skip it.
+ if { [llength $ret] > 0 } {
+ error_check_bad cursor_cmp_bad [$sdbc cmp $sdbc2] 0
+ set ret [$sdbc2 get -prev]
+ }
+
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} {$pkey [chop_data $pmethod $newd]}]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ error_check_good sdbc2_close [$sdbc2 close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.c"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSi$tnum.d:\
+ Primary cursor delete loop: deleting $half entries"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $half "Si$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSi$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Si$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+
+ # Close the env if it was created within this test.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/si003.tcl b/db-4.8.30/test/si003.tcl
new file mode 100644
index 0000000..e235066
--- /dev/null
+++ b/db-4.8.30/test/si003.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si003
+# TEST si001 with secondaries created and closed mid-test
+# TEST Basic secondary index put/delete test with secondaries
+# TEST created mid-test.
+proc si003 { methods {nentries 200} {tnum "003"} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # There's no reason to run this test on large lists.
+ if { $nentries > 1000 } {
+ puts "Skipping si003 for large lists (over 1000 items)"
+ return
+ }
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ if { [lsearch -exact $envflags "-thread"] != -1 &&\
+ [is_queue $pmethod] == 1 } {
+ puts "Skipping si$tnum for threaded env with queue"
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
+ cleanup $testdir $env
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline "\tSi$tnum.a: Put loop ... "
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "opening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
+
+ puts -nonewline "\tSi$tnum.b: Put/overwrite loop ... "
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+
+ # Close the secondaries again.
+ puts "closing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Si$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline \
+ "\tSi$tnum.c: Primary delete loop: deleting $half entries ..."
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $half keys data "Si$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+
+ # Close the env if it was created within this test.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/si004.tcl b/db-4.8.30/test/si004.tcl
new file mode 100644
index 0000000..e58bef7
--- /dev/null
+++ b/db-4.8.30/test/si004.tcl
@@ -0,0 +1,233 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si004
+# TEST si002 with secondaries created and closed mid-test
+# TEST Basic cursor-based secondary index put/delete test, with
+# TEST secondaries created mid-test.
+proc si004 { methods {nentries 200} {tnum "004"} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # There's no reason to run this test on large lists.
+ if { $nentries > 1000 } {
+ puts "Skipping si004 for large lists (over 1000 items)."
+ return
+ }
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ if { [lsearch -exact $envflags "-thread"] != -1 } {
+ puts "Skipping si$tnum for threaded env"
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
+ cleanup $testdir $env
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Populate with a cursor put, exercising keyfirst/keylast.
+ set did [open $dict]
+ puts -nonewline \
+ "\tSi$tnum.a: Cursor put (-keyfirst/-keylast) loop ... "
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } \
+ { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ close $did
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
+
+ puts "\tSi$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b"
+
+ puts -nonewline "\tSi$tnum.c:\
+ Secondary c_pget/primary put overwrite loop ... "
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} {$pkey [chop_data $pmethod $newd]}]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+
+ # Close the secondaries again.
+ puts "\n\t\tclosing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Si$tnum.c"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline "\tSi$tnum.d:\
+ Primary cursor delete loop: deleting $half entries ... "
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ cursor_check_secondaries $pdb $sdbs $half "Si$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSi$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Si$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+
+ # Close the env if it was created within this test.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/si005.tcl b/db-4.8.30/test/si005.tcl
new file mode 100644
index 0000000..63991fc
--- /dev/null
+++ b/db-4.8.30/test/si005.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si005
+# TEST Basic secondary index put/delete test with transactions
+proc si005 { methods {nentries 200} {tnum "005"} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ # Since this is a transaction test, don't allow nentries to be large.
+ if { $nentries > 1000 } {
+ puts "Skipping si005 for large lists (over 1000 items)."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append pargs " -auto_commit "
+ append argses " -auto_commit "
+ } else {
+ puts "Skipping si$tnum for non-transactional env."
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+ puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
+ puts "\twith transactions"
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
+ $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -auto_commit -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSi$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
+
+ puts "\tSi$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSi$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set txn [$env txn]
+ set ret [$pdb del -txn $txn $keys($n)]
+ error_check_good pdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Si$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSi$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set txn [$env txn]
+ set ret [$sdb del -txn $txn $skey]
+ error_check_good sdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Si$tnum.d"
+
+ puts "\tSi$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+
+ # Close the env if it was created within this test.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+ return
+}
diff --git a/db-4.8.30/test/si006.tcl b/db-4.8.30/test/si006.tcl
new file mode 100644
index 0000000..c468273
--- /dev/null
+++ b/db-4.8.30/test/si006.tcl
@@ -0,0 +1,186 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si006
+# TEST
+# TEST Test -immutable_key interface.
+# TEST
+# TEST DB_IMMUTABLE_KEY is an optimization to be used when a
+# TEST secondary key will not be changed. It does not prevent
+# TEST a deliberate change to the secondary key, it just does not
+# TEST propagate that change when it is made to the primary.
+# TEST This test verifies that a change to the primary is propagated
+# TEST to the secondary or not as specified by -immutable_key.
+
+proc si006 { methods {nentries 200} {tnum "006"} args } {
+ source ./include.tcl
+ global dict
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ set nsecondaries 2
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ if { [lsearch -exact $envflags "-thread"] != -1 &&\
+ [is_queue $pmethod] == 1 } {
+ puts "Skipping si$tnum for threaded env"
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "si$tnum \{\[ list $pmethod $methods \]\} $nentries"
+ cleanup $testdir $env
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries, without -immutable_key.
+ puts "\tSi$tnum.a: Open primary and secondary databases and associate."
+ set sdbs {}
+
+ set sdb1 [eval {berkdb_open -create -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.1.db]
+ error_check_good open_sdb1 [is_valid_db $sdb1] TRUE
+ error_check_good sdb1_associate \
+ [$pdb associate [callback_n 0] $sdb1] 0
+ lappend sdbs $sdb1
+
+ set sdb2 [eval {berkdb_open -create -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $snamebase.2.db]
+ error_check_good open_sdb2 [is_valid_db $sdb2] TRUE
+ error_check_good sdb2_associate \
+ [$pdb associate [callback_n 1] $sdb2] 0
+ lappend sdbs $sdb2
+
+ puts "\tSi$tnum.b: Put loop on primary database."
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ puts "\tSi$tnum.c: Check secondaries."
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.c"
+
+ puts "\tSi$tnum.d: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+
+ puts "\tSi$tnum.e: Reopen databases."
+ # Reopen the primary.
+ set pdb [eval {berkdb_open -env} $env $pname]
+ error_check_good primary_reopen [is_valid_db $pdb] TRUE
+
+ # Reopen and associate secondary without -immutable_key.
+ set mutable {}
+ set sdb1 [eval {berkdb_open -create -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.1.db]
+ error_check_good open_sdb1 [is_valid_db $sdb1] TRUE
+ error_check_good sdb1_associate \
+ [$pdb associate [callback_n 0] $sdb1] 0
+ lappend goodsdbs $mutable
+
+ # Reopen and associate second secondary with -immutable_key.
+ set immutable {}
+ set sdb2 [eval {berkdb_open -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $snamebase.2.db]
+ error_check_good reopen_sdb2 [is_valid_db $sdb2] TRUE
+ error_check_good sdb2_associate \
+ [$pdb associate -immutable_key [callback_n 1] $sdb2] 0
+ lappend immutable $sdb2
+
+ # Update primary. This should write to sdb1, but not sdb2.
+ puts "\tSi$tnum.f: Put loop on primary database."
+ set str "OVERWRITTEN"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ } else {
+ set key $keys($n)
+ }
+ set datum $str.$n
+ set data($n) [pad_data $pmethod $datum]
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+
+ puts "\tSi$tnum.g: Check secondaries without -immutable_key."
+ check_secondaries $pdb $mutable $nentries keys data "Si$tnum.g"
+
+ puts "\tSi$tnum.h: Check secondaries with -immutable_key."
+ if { [catch {check_secondaries \
+ $pdb $immutable $nentries keys data "Si$tnum.h"} res] != 1 } {
+ puts "FAIL: Immutable secondary key was changed."
+ }
+
+ puts "\tSi$tnum.i: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ error_check_good secondary1_close [$sdb1 close] 0
+ error_check_good secondary2_close [$sdb2 close] 0
+
+ # Don't close the env if this test was given one.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
+
diff --git a/db-4.8.30/test/si007.tcl b/db-4.8.30/test/si007.tcl
new file mode 100644
index 0000000..7e1eb90
--- /dev/null
+++ b/db-4.8.30/test/si007.tcl
@@ -0,0 +1,188 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si007
+# TEST Secondary index put/delete with lorder test
+# TEST
+# TEST This test is the same as si001 with the exception
+# TEST that we create and populate the primary and THEN
+# TEST create the secondaries and associate them with -create.
+
+proc si007 { methods {nentries 10} {tnum "007"} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ if [big_endian] {
+ set nativeargs " -lorder 4321"
+ set swappedargs " -lorder 1234"
+ } else {
+ set swappedargs " -lorder 4321"
+ set nativeargs " -lorder 1234"
+ }
+ set argtypes "{$nativeargs} {$swappedargs}"
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ if { [lsearch -exact $envflags "-thread"] != -1 &&\
+ [is_queue $pmethod] == 1 } {
+ puts "Skipping si$tnum for threaded env"
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ foreach pbyteargs $argtypes {
+ foreach sbyteargs $argtypes {
+ if { $pbyteargs == $nativeargs } {
+ puts "Si$tnum: Using native\
+ byteorder $nativeargs for primary."
+ } else {
+ puts "Si$tnum: Using swapped\
+ byteorder $swappedargs for primary."
+ }
+ if { $sbyteargs == $nativeargs } {
+ puts "Si$tnum: Using native\
+ byteorder $nativeargs for secondaries."
+ } else {
+ puts "Si$tnum: Using swapped\
+ byteorder $swappedargs for secondaries."
+ }
+
+ puts "si$tnum\
+ \{\[ list $pmethod $methods \]\} $nentries"
+ cleanup $testdir $env
+
+ # Open primary.
+ set pdb [eval {berkdb_open -create -env} $env \
+ $pomethod $pargs $pbyteargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts "\tSi$tnum.a: Populate primary."
+ # Open dictionary. Leave it open until done
+ # with test .e so append won't require
+ # configuration for duplicates.
+ set did [open $dict]
+ for { set n 0 } \
+ { [gets $did str] != -1 && $n < $nentries } \
+ { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put}\
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+
+ # Open and associate the secondaries, with -create.
+ puts "\tSi$tnum.b: Associate secondaries with -create."
+ set sdbs {}
+ for { set i 0 } \
+ { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $sbyteargs $snamebase.$i.db]
+ error_check_good\
+ second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries\
+ $pdb $sdbs $nentries keys data "Si$tnum.c"
+
+ puts "\tSi$tnum.c: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+
+ # Don't close the env if this test was given one.
+ # Skip the test of truncating the secondary since
+ # we can't close and reopen the outside env.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+
+ # Reopen with _noerr for test of
+ # truncate secondary.
+ puts "\tSi$tnum.h:\
+ Truncate secondary (should fail)"
+
+ set env [berkdb_env_noerr\
+ -create -home $testdir]
+ error_check_good\
+ env_open [is_valid_env $env] TRUE
+
+ set pdb [eval {berkdb_open_noerr -create -env}\
+ $env $pomethod $pargs $pname]
+ set sdb [eval {berkdb_open_noerr -create -env}\
+ $env [lindex $omethods 0]\
+ [lindex $argses 0] $snamebase.0.db ]
+ $pdb associate [callback_n 0] $sdb
+
+ set ret [catch {$sdb truncate} ret]
+ error_check_good trunc_secondary $ret 1
+
+ error_check_good primary_close [$pdb close] 0
+ error_check_good secondary_close [$sdb close] 0
+ }
+ }
+ }
+ # If this test made the last env, close it.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/si008.tcl b/db-4.8.30/test/si008.tcl
new file mode 100644
index 0000000..2bba11a
--- /dev/null
+++ b/db-4.8.30/test/si008.tcl
@@ -0,0 +1,273 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST si008
+# TEST Secondary index put/delete with lorder test
+# TEST
+# TEST This test is the same as si001 except that we
+# TEST create the secondaries with different byte orders:
+# TEST one native, one swapped.
+
+proc si008 { methods {nentries 10} {tnum "008"} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ if [big_endian] {
+ set nativeargs " -lorder 4321"
+ set swappedargs " -lorder 1234"
+ } else {
+ set swappedargs " -lorder 4321"
+ set nativeargs " -lorder 1234"
+ }
+ set argtypes "{$nativeargs} {$swappedargs}"
+ set pomethod [convert_method $pmethod]
+
+ # Renumbering recno databases can't be used as primaries.
+ if { [is_rrecno $pmethod] == 1 } {
+ puts "Skipping si$tnum for method $pmethod"
+ return
+ }
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method (for btree or hash)
+ # and a standard number of secondaries. If primary is not
+ # btree or hash, force secondaries to be one btree, one hash.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ if { [is_btree $pmethod] || [is_hash $pmethod] } {
+ lappend methods $pmethod
+ } else {
+ if { [expr $i % 2] == 0 } {
+ lappend methods "-btree"
+ } else {
+ lappend methods "-hash"
+ }
+ }
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ # If we are given an env, use it. Otherwise, open one.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ env_cleanup $testdir
+ set cacheargs " -cachesize {0 1048576 1} "
+ set env [eval berkdb_env -create $cacheargs -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set envflags [$env get_open_flags]
+ if { [lsearch -exact $envflags "-thread"] != -1 &&\
+ [is_queue $pmethod] == 1 } {
+ puts "Skipping si$tnum for threaded env"
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ set pname "primary$tnum.db"
+ set snamebase "secondary$tnum"
+
+ foreach pbyteargs $argtypes {
+ if { $pbyteargs == $nativeargs } {
+ puts "Si$tnum: Using native\
+ byteorder $nativeargs for primary."
+ } else {
+ puts "Si$tnum: Using swapped\
+ byteorder $swappedargs for primary."
+ }
+
+ puts "si$tnum\
+ \{\[ list $pmethod $methods \]\} $nentries"
+ cleanup $testdir $env
+
+ # Open primary.
+ set pdb [eval {berkdb_open -create -env} $env \
+ $pomethod $pargs $pbyteargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ if { [expr $i % 2] == 0 } {
+ set sbyteargs $nativeargs
+ } else {
+ set sbyteargs $swappedargs
+ }
+
+ if { $sbyteargs == $nativeargs } {
+ puts "Si$tnum: Using native byteorder\
+ $nativeargs for secondary $i."
+ } else {
+ puts "Si$tnum: Using swapped byteorder\
+ $swappedargs for secondary $i."
+ }
+
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $sbyteargs $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSi$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put}\
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ check_secondaries $pdb $sdbs $nentries keys data "Si$tnum.a"
+
+ puts "\tSi$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put}\
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+ check_secondaries\
+ $pdb $sdbs $nentries keys data "Si$tnum.b"
+
+ # Delete the second half of the entries through
+ # the primary. We do the second half so we can
+ # just pass keys(0 ... n/2) to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSi$tnum.c:\
+ Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+ check_secondaries\
+ $pdb $sdbs $half keys data "Si$tnum.c"
+
+ # Delete half of what's left through
+ # the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSi$tnum.d:\
+ Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n)\
+ [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries\
+ $pdb $sdbs $quar keys data "Si$tnum.d"
+ set left $quar
+
+ # For queue and recno only, test append, adding back
+ # a quarter of the original number of entries.
+ if { [is_record_based $pmethod] == 1 } {
+ set did [open $dict]
+ puts "\tSi$tnum.e:\
+ Append loop: append $quar entries"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ # Skip over dictionary entries we've
+ # already used.
+ gets $did str
+ }
+ for { set n $quar } \
+ { [gets $did str] != -1 && $n < $half } \
+ { incr n } {
+ set key [expr $n + 1]
+ set datum $str
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ check_secondaries\
+ $pdb $sdbs $half keys data "Si$tnum.e"
+ set left $half
+ }
+
+ puts "\tSi$tnum.f:\
+ Truncate primary, check secondaries are empty."
+ error_check_good truncate [$pdb truncate] $left
+ foreach sdb $sdbs {
+ set scursor [$sdb cursor]
+ error_check_good\
+ db_cursor [is_substr $scursor $sdb] 1
+ set ret [$scursor get -first]
+ error_check_good\
+ sec_empty [string length $ret] 0
+ error_check_good cursor_close [$scursor close] 0
+ }
+
+
+ puts "\tSi$tnum.g: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+
+ # Don't close the env if this test was given one.
+ # Skip the test of truncating the secondary since
+ # we can't close and reopen the outside env.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+
+ # Reopen with _noerr for test of
+ # truncate secondary.
+ puts "\tSi$tnum.h:\
+ Truncate secondary (should fail)"
+
+ set env [berkdb_env_noerr\
+ -create -home $testdir]
+ error_check_good\
+ env_open [is_valid_env $env] TRUE
+
+ set pdb [eval {berkdb_open_noerr -create -env}\
+ $env $pomethod $pargs $pname]
+ set sdb [eval {berkdb_open_noerr -create -env}\
+ $env [lindex $omethods 0]\
+ [lindex $argses 0] $snamebase.0.db ]
+ $pdb associate [callback_n 0] $sdb
+
+ set ret [catch {$sdb truncate} ret]
+ error_check_good trunc_secondary $ret 1
+
+ error_check_good primary_close [$pdb close] 0
+ error_check_good secondary_close [$sdb close] 0
+ }
+ }
+
+ # If this test made the last env, close it.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/sijointest.tcl b/db-4.8.30/test/sijointest.tcl
new file mode 100644
index 0000000..a1d32c9
--- /dev/null
+++ b/db-4.8.30/test/sijointest.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST sijointest: Secondary index and join test.
+# TEST This used to be si005.tcl.
+proc sijointest { methods {nentries 1000} args } {
+ source ./include.tcl
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Si005 does a join within a simulated database schema
+ # in which the primary index maps a record ID to a ZIP code and
+ # name in the form "XXXXXname", and there are two secondaries:
+ # one mapping ZIP to ID, the other mapping name to ID.
+ # The primary may be of any database type; the two secondaries
+ # must be either btree or hash.
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method for the two secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < 2 } { incr i } {
+ lappend methods $pmethod
+ }
+ } elseif { [llength $methods] != 2 } {
+ puts "FAIL: Sijoin requires exactly two secondaries."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Secondary index join test."
+ puts "sijoin \{\[ list $pmethod $methods \]\} $nentries"
+ env_cleanup $testdir
+
+ set pname "sijoin-primary.db"
+ set zipname "sijoin-zip.db"
+ set namename "sijoin-name.db"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the databases.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ set zipdb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $zipname]
+ error_check_good zip_open [is_valid_db $zipdb] TRUE
+ error_check_good zip_associate [$pdb associate sj_getzip $zipdb] 0
+
+ set namedb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $namename]
+ error_check_good name_open [is_valid_db $namedb] TRUE
+ error_check_good name_associate [$pdb associate sj_getname $namedb] 0
+
+ puts "\tSijoin.a: Populate database with $nentries \"names\""
+ sj_populate $pdb $nentries
+ puts "\tSijoin.b: Perform a join on each \"name\" and \"ZIP\""
+ sj_jointest $pdb $zipdb $namedb
+
+ error_check_good name_close [$namedb close] 0
+ error_check_good zip_close [$zipdb close] 0
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc sj_jointest { pdb zipdb namedb } {
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set item [lindex [lindex $dbt 0] 1]
+ set retlist [sj_dojoin $item $pdb $zipdb $namedb]
+ }
+}
+
+proc sj_dojoin { item pdb zipdb namedb } {
+ set name [sj_getname "" $item]
+ set zip [sj_getzip "" $item]
+
+ set zipc [$zipdb cursor]
+ error_check_good zipc($item) [is_valid_cursor $zipc $zipdb] TRUE
+
+ set namec [$namedb cursor]
+ error_check_good namec($item) [is_valid_cursor $namec $namedb] TRUE
+
+ set pc [$pdb cursor]
+ error_check_good pc($item) [is_valid_cursor $pc $pdb] TRUE
+
+ set ret [$zipc get -set $zip]
+ set zd [lindex [lindex $ret 0] 1]
+ error_check_good zipset($zip) [sj_getzip "" $zd] $zip
+
+ set ret [$namec get -set $name]
+ set nd [lindex [lindex $ret 0] 1]
+ error_check_good nameset($name) [sj_getname "" $nd] $name
+
+ set joinc [$pdb join $zipc $namec]
+
+ set anyreturned 0
+ for { set dbt [$joinc get] } { [llength $dbt] > 0 } \
+ { set dbt [$joinc get] } {
+ set ritem [lindex [lindex $dbt 0] 1]
+ error_check_good returned_item($item) $ritem $item
+ incr anyreturned
+ }
+ error_check_bad anyreturned($item) $anyreturned 0
+
+ error_check_good joinc_close($item) [$joinc close] 0
+ error_check_good pc_close($item) [$pc close] 0
+ error_check_good namec_close($item) [$namec close] 0
+ error_check_good zipc_close($item) [$zipc close] 0
+}
+
+proc sj_populate { db nentries } {
+ global dict
+
+ set did [open $dict]
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ puts "FAIL:\
+ unexpected pair of words < 3 chars long"
+ }
+ }
+ set datalist [sj_name2zips $word]
+ foreach data $datalist {
+ error_check_good db_put($data) [$db put $i $data$word] 0
+ }
+ }
+ close $did
+}
+
+proc sj_getzip { key data } { return [string range $data 0 4] }
+proc sj_getname { key data } { return [string range $data 5 end] }
+
+# The dirty secret of this test is that the ZIP code is a function of the
+# name, so we can generate a database and then verify join results easily
+# without having to consult actual data.
+#
+# Any word passed into this function will generate from 1 to 26 ZIP
+# entries, out of the set {00000, 01000 ... 99000}. The number of entries
+# is just the position in the alphabet of the word's first letter; the
+# entries are then hashed to the set {00, 01 ... 99} N different ways.
+proc sj_name2zips { name } {
+ global alphabet
+
+ set n [expr [string first [string index $name 0] $alphabet] + 1]
+ error_check_bad starts_with_abc($name) $n -1
+
+ set ret {}
+ for { set i 0 } { $i < $n } { incr i } {
+ set b 0
+ for { set j 1 } { $j < [string length $name] } \
+ { incr j } {
+ set b [sj_nhash $name $i $j $b]
+ }
+ lappend ret [format %05u [expr $b % 100]000]
+ }
+ return $ret
+}
+proc sj_nhash { name i j b } {
+ global alphabet
+
+ set c [string first [string index $name $j] $alphabet']
+ return [expr (($b * 991) + ($i * 997) + $c) % 10000000]
+}
diff --git a/db-4.8.30/test/siutils.tcl b/db-4.8.30/test/siutils.tcl
new file mode 100644
index 0000000..71b77d2
--- /dev/null
+++ b/db-4.8.30/test/siutils.tcl
@@ -0,0 +1,292 @@
+#See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Secondary index utilities. This file used to be known as
+# sindex.tcl.
+#
+# The secondary index tests themselves live in si0*.tcl.
+#
+# Standard number of secondary indices to create if a single-element
+# list of methods is passed into the secondary index tests.
+global nsecondaries
+set nsecondaries 2
+
+# The callback function we use for each given secondary in most tests
+# is a simple function of its place in the list of secondaries (0-based)
+# and the access method (since recnos may need different callbacks).
+#
+# !!!
+# Note that callbacks 0-3 return unique secondary keys if the input data
+# are unique; callbacks 4 and higher may not, so don't use them with
+# the normal wordlist and secondaries that don't support dups.
+# The callbacks that incorporate a key don't work properly with recno
+# access methods, at least not in the current test framework (the
+# error_check_good lines test for e.g. 1foo, when the database has
+# e.g. 0x010x000x000x00foo).
+proc callback_n { n } {
+ switch $n {
+ 0 { return _s_reversedata }
+ 1 { return _s_noop }
+ 2 { return _s_concatkeydata }
+ 3 { return _s_concatdatakey }
+ 4 { return _s_reverseconcat }
+ 5 { return _s_truncdata }
+ 6 { return _s_constant }
+ 7 { return _s_twokeys }
+ 8 { return _s_variablekeys }
+ }
+ return _s_noop
+}
+
+proc _s_noop { a b } { return $b }
+proc _s_reversedata { a b } { return [reverse $b] }
+proc _s_truncdata { a b } { return [string range $b 1 end] }
+proc _s_concatkeydata { a b } { return $a$b }
+proc _s_concatdatakey { a b } { return $b$a }
+proc _s_reverseconcat { a b } { return [reverse $a$b] }
+proc _s_constant { a b } { return "constant-data" }
+proc _s_twokeys { a b } { return [list 1 2] }
+proc _s_variablekeys { a b } {
+ set rlen [string length $b]
+ set result {}
+ for {set i 0} {$i < $rlen} {incr i} {
+ lappend $result $i
+ }
+ return $result
+}
+
+# Should the check_secondary routines print lots of output?
+set verbose_check_secondaries 0
+
+# Given a primary database handle, a list of secondary handles, a
+# number of entries, and arrays of keys and data, verify that all
+# databases have what they ought to.
+proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} \
+ {errp NONE} {errs NONE} {errsg NONE}} {
+ upvar $keyarr keys
+ upvar $dataarr data
+ global verbose_check_secondaries
+
+ if { [string compare $errp NONE] != 0 } {
+ upvar $errp errorp
+ }
+ set errorp 0
+ if { [string compare $errs NONE] != 0 } {
+ upvar $errs errors
+ }
+ set errors 0
+ if { [string compare $errsg NONE] != 0 } {
+ upvar $errsg errorsg
+ }
+ set errorsg 0
+ # Make sure each key/data pair is in the primary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1: Each key/data pair is in the primary"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ if { [string equal $errp NONE] } {
+ error_check_good pdb_get($i) [$pdb get $keys($i)] \
+ [list [list $keys($i) $data($i)]]
+ } else {
+ set stat [catch {$pdb get $keys($i)} ret]
+ if { $stat == 1 } {
+ set errorp $ret
+ break
+ } else {
+ error_check_good pdb_get($i) $ret \
+ [list [list $keys($i) $data($i)]]
+ }
+ }
+ }
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ # Make sure each key/data pair is in this secondary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Each skey/key/data tuple is in secondary #$j"
+ }
+ set sdb [lindex $sdbs $j]
+ set nskeys 0
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set skeys [[callback_n $j] $keys($i) $data($i)]
+ if { [llength $skeys] == 0 } {
+ set skeys [list $skeys]
+ }
+ foreach skey $skeys {
+ incr nskeys
+ # Check with pget on the secondary.
+ set stat [catch {$sdb pget -get_both \
+ $skey $keys($i)} ret]
+ if { [string equal $errs NONE] } {
+ error_check_good stat $stat 0
+ error_check_good sdb($j)_pget($i) $ret \
+ [list [list \
+ $skey $keys($i) $data($i)]]
+ } else {
+ if { $stat == 1 } {
+ set errors $ret
+ } else {
+ error_check_good \
+ sdb($j)_pget($i) $ret \
+ [list [list \
+ $skey $keys($i) $data($i)]]
+ }
+ }
+ # Check again with get on the secondary. Since
+ # get_both is not an allowed option with get on
+ # a secondary handle, we can't guarantee an
+ # exact match on method 5 and over. We just
+ # make sure that one of the returned key/data
+ # pairs is the right one.
+ if { $j >= 5 } {
+ error_check_good sdb($j)_get($i) \
+ [is_substr [$sdb get $skey] \
+ [list [list $skey $data($i)]]] 1
+ } else {
+ set stat [catch {$sdb get $skey} ret]
+ if { [string equal $errs NONE] } {
+ error_check_good \
+ sdb($j)_get($i) $ret \
+ [list [list \
+ $skey $data($i)]]
+ } else {
+ if { $stat == 1 } {
+ set errorsg $ret
+ break
+ } else {
+ error_check_good \
+ sdb($j)_get($i) \
+ $ret [list [list \
+ $skey $data($i)]]
+ }
+ }
+ }
+ #
+ # We couldn't break above because we need to
+ # execute the errorsg error as well.
+ #
+ if { $errors != 0 } {
+ break
+ }
+ }
+ }
+ if { $errors != 0 || $errorsg != 0 } {
+ break
+ }
+
+ # Make sure this secondary contains only $nskeys
+ # items.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.3: Secondary #$j has $nskeys items"
+ }
+ set dbc [$sdb cursor]
+ error_check_good dbc($i) \
+ [is_valid_cursor $dbc $sdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } \
+ { incr k } { }
+ error_check_good numitems($i) $k $nskeys
+ error_check_good dbc($i)_close [$dbc close] 0
+ }
+ if { $errorp != 0 || $errors != 0 || $errorsg != 0 } {
+ return
+ }
+
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.4: Primary has $nentries items"
+ }
+ set dbc [$pdb cursor]
+ error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { }
+ error_check_good numitems $k $nentries
+ error_check_good pdbc_close [$dbc close] 0
+}
+
+# Given a primary database handle and a list of secondary handles, walk
+# through the primary and make sure all the secondaries are correct,
+# then walk through the secondaries and make sure the primary is correct.
+#
+# This is slightly less rigorous than the normal check_secondaries--we
+# use it whenever we don't have up-to-date "keys" and "data" arrays.
+proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } {
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair in the primary is in each secondary.
+ set pdbc [$pdb cursor]
+ error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE
+ set i 0
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1:\
+ Key/data in primary => key/data in secondaries"
+ }
+
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 0]
+ set pdata [lindex [lindex $dbt 0] 1]
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ set sdb [lindex $sdbs $j]
+ # Check with pget.
+ foreach skey [[callback_n $j] $pkey $pdata] {
+ set sdbt [$sdb pget -get_both $skey $pkey]
+ error_check_good pkey($pkey,$j) \
+ [lindex [lindex $sdbt 0] 1] $pkey
+ error_check_good pdata($pdata,$j) \
+ [lindex [lindex $sdbt 0] 2] $pdata
+ }
+ }
+ }
+ error_check_good ccs_pdbc_close [$pdbc close] 0
+ error_check_good primary_has_nentries $i $nentries
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Key/data in secondary #$j => key/data in primary"
+ }
+ set sdb [lindex $sdbs $j]
+ set sdbc [$sdb cursor]
+ error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+
+ # To exercise pget -last/pget -prev, we do it backwards too.
+ for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -prev] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get_bkwds($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+
+ error_check_good ccs_sdbc_close($j) [$sdbc close] 0
+ }
+}
+
+# The secondary index tests take a list of the access methods that
+# each array ought to use. Convert at one blow into a list of converted
+# argses and omethods for each method in the list.
+proc convert_argses { methods largs } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_args $m $largs]
+ }
+ return $ret
+}
+proc convert_methods { methods } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_method $m]
+ }
+ return $ret
+}
diff --git a/db-4.8.30/test/sysscript.tcl b/db-4.8.30/test/sysscript.tcl
new file mode 100644
index 0000000..1ef1fcc
--- /dev/null
+++ b/db-4.8.30/test/sysscript.tcl
@@ -0,0 +1,282 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# System integration test script.
+# This script runs a single process that tests the full functionality of
+# the system. The database under test contains nfiles files. Each process
+# randomly generates a key and some data. Both keys and data are bimodally
+# distributed between small keys (1-10 characters) and large keys (the avg
+# length is indicated via the command line parameter.
+# The process then decides on a replication factor between 1 and nfiles.
+# It writes the key and data to that many files and tacks on the file ids
+# of the files it writes to the data string. For example, let's say that
+# I randomly generate the key dog and data cat. Then I pick a replication
+# factor of 3. I pick 3 files from the set of n (say 1, 3, and 5). I then
+# rewrite the data as 1:3:5:cat. I begin a transaction, add the key/data
+# pair to each file and then commit. Notice that I may generate replication
+# of the form 1:3:3:cat in which case I simply add a duplicate to file 3.
+#
+# Usage: sysscript dir nfiles key_avg data_avg
+#
+# dir: DB_HOME directory
+# nfiles: number of files in the set
+# key_avg: average big key size
+# data_avg: average big data size
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set mypid [pid]
+
+set usage "sysscript dir nfiles key_avg data_avg method args"
+
+# Verify usage
+if { $argc < 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+puts [concat "Argc: " $argc " Argv: " $argv]
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set nfiles [ lindex $argv 1 ]
+set key_avg [ lindex $argv 2 ]
+set data_avg [ lindex $argv 3 ]
+set method [ lindex $argv 4 ]
+set args [ lindex $argv 5 ]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts "Beginning execution for $mypid"
+puts "$dir DB_HOME"
+puts "$nfiles files"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+
+flush stdout
+
+# Create local environment
+set dbenv [berkdb_env -txn -home $dir]
+set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
+if {$err != 0} {
+ puts $ret
+ return
+}
+
+# Now open the files
+for { set i 0 } { $i < $nfiles } { incr i } {
+ set file test044.$i.db
+ set db_set($i) [eval {berkdb open} -auto_commit -env $dbenv $args $method $file ]
+ set err [catch {error_check_bad $mypid:dbopen $db_set($i) NULL} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_bad $mypid:dbopen [is_substr $db_set($i) \
+ error] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+set record_based [is_record_based $method]
+while { 1 } {
+ # Decide if we're going to create a big key or a small key
+ # We give small keys a 70% chance.
+ if { [berkdb random_int 1 10] < 8 } {
+ set k [random_data 5 0 0 $record_based]
+ } else {
+ set k [random_data $key_avg 0 0 $record_based]
+ }
+ set data [chop_data $method [random_data $data_avg 0 0]]
+
+ set txn [$dbenv txn]
+ set err [catch {error_check_good $mypid:txn_begin [is_substr $txn \
+ $dbenv.txn] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+
+ # Open cursors
+ for { set f 0 } {$f < $nfiles} {incr f} {
+ set cursors($f) [$db_set($f) cursor -txn $txn]
+ set err [catch {error_check_good $mypid:cursor_open \
+ [is_substr $cursors($f) $db_set($f)] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set aborted 0
+
+ # Check to see if key is already in database
+ set found 0
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set r [$db_set($i) get -txn $txn $k]
+ set r [$db_set($i) get -txn $txn $k]
+ if { $r == "-1" } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:txn_abort \
+ [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ set found 2
+ break
+ } elseif { $r != "Key $k not found." } {
+ set found 1
+ break
+ }
+ }
+ switch $found {
+ 2 {
+ # Transaction aborted, no need to do anything.
+ }
+ 0 {
+ # Key was not found, decide how much to replicate
+ # and then create a list of that many file IDs.
+ set repl [berkdb random_int 1 $nfiles]
+ set fset ""
+ for { set i 0 } { $i < $repl } {incr i} {
+ set f [berkdb random_int 0 [expr $nfiles - 1]]
+ lappend fset $f
+ set data [chop_data $method $f:$data]
+ }
+
+ foreach i $fset {
+ set r [$db_set($i) put -txn $txn $k $data]
+ if {$r == "-1"} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ }
+ }
+ 1 {
+ # Key was found. Make sure that all the data values
+ # look good.
+ set f [zero_list $nfiles]
+ set data $r
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $f $fnum] == 0 } {
+ #set flag -set
+ set full [record $cursors($fnum) get -set $k]
+ } else {
+ #set flag -next
+ set full [record $cursors($fnum) get -next]
+ }
+ if {[llength $full] == 0} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ set err [catch {error_check_bad \
+ $mypid:curs_get($k,$data,$fnum,$flag) \
+ [string length $full] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [pad_data $method [lindex [lindex $full 0] 1]]
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:key $key $k} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:data($k) $rec $data} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set f [lreplace $f $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+ if { $aborted == 0 } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:commit [$txn commit] \
+ 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+}
+
+# Close files
+for { set i 0 } { $i < $nfiles} { incr i } {
+ set r [$db_set($i) close]
+ set err [catch {error_check_good $mypid:db_close:$i $r 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+# Close tm and environment
+$dbenv close
+
+puts "[timestamp] [pid] Complete"
+flush stdout
+
+filecheck $file 0
diff --git a/db-4.8.30/test/t106script.tcl b/db-4.8.30/test/t106script.tcl
new file mode 100644
index 0000000..52ae804
--- /dev/null
+++ b/db-4.8.30/test/t106script.tcl
@@ -0,0 +1,331 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+
+proc t106_initial { nitems nprod id tnum dbenv order args } {
+ source ./include.tcl
+
+ set pid [pid]
+ puts "\tTest$tnum: Producer $pid initializing DBs"
+
+ # Each producer initially loads a small number of items to
+ # each btree database, then enters a RMW loop where it randomly
+ # selects and executes a cursor operations which either:
+ # 1. Read-modify-write an item in db2; or
+ # 2. Read-modify-write an item in both db2 and db3, randomly
+ # selecting between db2 and db3 on which to open first, which to
+ # read first, which to write first, which to close first. This
+ # may create deadlocks so keep trying until it's successful.
+
+ # Open queue database
+ set dbq [eval {berkdb_open -create -queue -env $dbenv\
+ -auto_commit -len 32 queue.db} ]
+ error_check_good dbq_open [is_valid_db $dbq] TRUE
+
+ # Open four btree databases
+ set db1 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile1.db]
+ error_check_good db1_open [is_valid_db $db1] TRUE
+ set db2 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile2.db]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+ set db3 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile3.db]
+ error_check_good db3_open [is_valid_db $db3] TRUE
+ set db4 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile4.db]
+ error_check_good db4_open [is_valid_db $db4] TRUE
+
+ # Initialize databases with $nitems items from each producer.
+ set did [open $dict]
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set db2data [read $did [berkdb random_int 300 700]]
+ set db3data [read $did [berkdb random_int 500 1000]]
+ set qdata [read $did 32]
+ set suffix _0_$i
+ set db23key "testclient$id$suffix"
+ set suffix _$i
+ set db4key key$id$suffix
+
+ set t [$dbenv txn]
+ set txn "-txn $t"
+ error_check_good db2_put [eval {$db2 put} $txn\
+ {$db23key $db2data}] 0
+ error_check_good db3_put [eval {$db3 put} $txn\
+ {$db23key $db3data}] 0
+ error_check_good db4_put [eval {$db4 put} $txn\
+ {$db4key $db23key}] 0
+
+ set c [$dbenv txn -parent $t]
+ set ctxn "-txn $c"
+ set qrecno [eval {$dbq put -append} $ctxn {$qdata}]
+ error_check_good db1_put [eval {$db1 put} $ctxn\
+ {$qrecno $db2data}] 0
+ error_check_good commit_child [$c commit] 0
+ error_check_good commit_parent [$t commit] 0
+ }
+ close $did
+
+ set ret [catch {$dbq close} res]
+ error_check_good dbq_close:$pid $ret 0
+ set ret [catch {$db1 close} res]
+ error_check_good db1_close:$pid $ret 0
+ set ret [catch {$db2 close} res]
+ error_check_good db2_close:$pid $ret 0
+ set ret [catch {$db3 close} res]
+ error_check_good db3_close:$pid $ret 0
+ set ret [catch {$db4 close} res]
+ error_check_good db4_close:$pid $ret 0
+
+ puts "\t\tTest$tnum: Initializer $pid finished."
+}
+
+proc t106_produce { nitems nprod id tnum dbenv order niter args } {
+ source ./include.tcl
+
+ set pid [pid]
+ set did [open $dict]
+ puts "\tTest$tnum: Producer $pid initializing DBs"
+
+ # Open queue database
+ set dbq [eval {berkdb_open -create -queue -env $dbenv\
+ -auto_commit -len 32 queue.db} ]
+ error_check_good dbq_open [is_valid_db $dbq] TRUE
+
+ # Open four btree databases
+ set db1 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile1.db]
+ error_check_good db1_open [is_valid_db $db1] TRUE
+ set db2 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile2.db]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+ set db3 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile3.db]
+ error_check_good db3_open [is_valid_db $db3] TRUE
+ set db4 [berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile4.db]
+ error_check_good db4_open [is_valid_db $db4] TRUE
+
+ # Now go into RMW phase.
+ for { set i 1 } { $i <= $niter } { incr i } {
+
+ set op [berkdb random_int 1 2]
+ set newdb2data [read $did [berkdb random_int 300 700]]
+ set qdata [read $did 32]
+
+ if { $order == "ordered" } {
+ set n [expr $i % $nitems]
+ if { $n == 0 } {
+ set n $nitems
+ }
+ set suffix _0_$n
+ } else {
+ # Retrieve a random key from the list
+ set suffix _0_[berkdb random_int 1 $nitems]
+ }
+ set key "testclient$id$suffix"
+
+ set t [$dbenv txn]
+ set txn "-txn $t"
+
+ # Now execute op1 or op2
+ if { $op == 1 } {
+ op1 $db2 $key $newdb2data $txn
+ } elseif { $op == 2 } {
+ set newdb3data [read $did [berkdb random_int 500 1000]]
+ op2 $db2 $db3 $key $newdb2data $newdb3data $txn $dbenv
+ } else {
+ puts "FAIL: unrecogized op $op"
+ }
+ set c [$dbenv txn -parent $t]
+ set ctxn "-txn $c"
+ set qrecno [eval {$dbq put -append} $ctxn {$qdata}]
+ error_check_good db1_put [eval {$db1 put} $ctxn\
+ {$qrecno $newdb2data}] 0
+ error_check_good child_commit [$c commit] 0
+ error_check_good parent_commit [$t commit] 0
+ }
+ close $did
+
+ set ret [catch {$dbq close} res]
+ error_check_good dbq_close:$pid $ret 0
+ set ret [catch {$db1 close} res]
+ error_check_good db1_close:$pid $ret 0
+ set ret [catch {$db2 close} res]
+ error_check_good db2_close:$pid $ret 0
+ set ret [catch {$db3 close} res]
+ error_check_good db3_close:$pid $ret 0
+ set ret [catch {$db4 close} res]
+ error_check_good db4_close:$pid $ret 0
+
+ puts "\t\tTest$tnum: Producer $pid finished."
+}
+
+proc t106_consume { nitems tnum outputfile mode dbenv niter args } {
+ source ./include.tcl
+ set pid [pid]
+ puts "\tTest$tnum: Consumer $pid starting ($niter iterations)."
+
+ # Open queue database and btree database 1.
+ set dbq [eval {berkdb_open \
+ -create -queue -env $dbenv -auto_commit -len 32 queue.db} ]
+ error_check_good dbq_open:$pid [is_valid_db $dbq] TRUE
+
+ set db1 [eval {berkdb_open \
+ -create -btree -env $dbenv -auto_commit testfile1.db} ]
+ error_check_good db1_open:$pid [is_valid_db $db1] TRUE
+
+ set oid [open $outputfile a]
+
+ for { set i 1 } { $i <= $nitems } {incr i } {
+ set t [$dbenv txn]
+ set txn "-txn $t"
+ set ret [eval {$dbq get $mode} $txn]
+ set qrecno [lindex [lindex $ret 0] 0]
+ set db1curs [eval {$db1 cursor} $txn]
+ if {[catch {eval $db1curs get -set -rmw $qrecno} res]} {
+ puts "FAIL: $db1curs get: $res"
+ }
+ error_check_good db1curs_del [$db1curs del] 0
+ error_check_good db1curs_close [$db1curs close] 0
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ error_check_good output_close:$pid [close $oid] ""
+
+ set ret [catch {$dbq close} res]
+ error_check_good dbq_close:$pid $ret 0
+ set ret [catch {$db1 close} res]
+ error_check_good db1_close:$pid $ret 0
+ puts "\t\tTest$tnum: Consumer $pid finished."
+}
+
+# op1 overwrites one data item in db2.
+proc op1 { db2 key newdata txn } {
+
+ set db2c [eval {$db2 cursor} $txn]
+puts "in op1, key is $key"
+ set ret [eval {$db2c get -set -rmw $key}]
+ # Make sure we retrieved something
+ error_check_good db2c_get [llength $ret] 1
+ error_check_good db2c_put [eval {$db2c put} -current {$newdata}] 0
+ error_check_good db2c_close [$db2c close] 0
+}
+
+# op 2
+proc op2 { db2 db3 key newdata2 newdata3 txn dbenv } {
+
+ # Randomly choose whether to work on db2 or db3 first for
+ # each operation: open cursor, get, put, close.
+ set open1 [berkdb random_int 0 1]
+ set get1 [berkdb random_int 0 1]
+ set put1 [berkdb random_int 0 1]
+ set close1 [berkdb random_int 0 1]
+puts "open [expr $open1 + 2] first, get [expr $get1 + 2] first,\
+ put [expr $put1 + 2] first, close [expr $close1 + 2] first"
+puts "in op2, key is $key"
+
+ # Open cursor
+ if { $open1 == 0 } {
+ set db2c [eval {$db2 cursor} $txn]
+ set db3c [eval {$db3 cursor} $txn]
+ } else {
+ set db3c [eval {$db3 cursor} $txn]
+ set db2c [eval {$db2 cursor} $txn]
+ }
+ error_check_good db2_cursor [is_valid_cursor $db2c $db2] TRUE
+ error_check_good db3_cursor [is_valid_cursor $db3c $db3] TRUE
+
+ # Do the following until we succeed and don't get DB_DEADLOCK:
+ if { $get1 == 0 } {
+ get_set_rmw $db2c $key $dbenv
+ get_set_rmw $db3c $key $dbenv
+ } else {
+ get_set_rmw $db3c $key $dbenv
+ get_set_rmw $db2c $key $dbenv
+ }
+
+ # Put new data.
+ if { $put1 == 0 } {
+ error_check_good db2c_put [eval {$db2c put} \
+ -current {$newdata2}] 0
+ error_check_good db3c_put [eval {$db3c put} \
+ -current {$newdata3}] 0
+ } else {
+ error_check_good db3c_put [eval {$db3c put} \
+ -current {$newdata3}] 0
+ error_check_good db2c_put [eval {$db2c put} \
+ -current {$newdata2}] 0
+ }
+ if { $close1 == 0 } {
+ error_check_good db2c_close [$db2c close] 0
+ error_check_good db3c_close [$db3c close] 0
+ } else {
+ error_check_good db3c_close [$db3c close] 0
+ error_check_good db2c_close [$db2c close] 0
+ }
+}
+
+proc get_set_rmw { dbcursor key dbenv } {
+
+ while { 1 } {
+ if {[catch {set ret [eval {$dbcursor get -set -rmw} $key]}\
+ res ]} {
+ # If the get failed, break if it failed for any
+ # reason other than deadlock. If we have deadlock,
+ # the deadlock detector should break the deadlock
+ # as we keep trying.
+ if { [is_substr $res DB_LOCK_DEADLOCK] != 1 } {
+ puts "FAIL: get_set_rmw: $res"
+ break
+ }
+ } else {
+ # We succeeded. Go back to the body of the test.
+ break
+ }
+ }
+}
+
+source ./include.tcl
+source $test_path/test.tcl
+
+# Verify usage
+set usage "t106script.tcl dir runtype nitems nprod outputfile id tnum order"
+if { $argc < 10 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set runtype [lindex $argv 1]
+set nitems [lindex $argv 2]
+set nprod [lindex $argv 3]
+set outputfile [lindex $argv 4]
+set id [lindex $argv 5]
+set tnum [lindex $argv 6]
+set order [lindex $argv 7]
+set niter [lindex $argv 8]
+# args is the string "{ -len 20 -pad 0}", so we need to extract the
+# " -len 20 -pad 0" part.
+set args [lindex [lrange $argv 9 end] 0]
+
+# Open env
+set dbenv [berkdb_env -home $dir -txn]
+error_check_good dbenv_open [is_valid_env $dbenv] TRUE
+
+# Invoke initial, produce or consume based on $runtype
+if { $runtype == "INITIAL" } {
+ t106_initial $nitems $nprod $id $tnum $dbenv $order $args
+} elseif { $runtype == "PRODUCE" } {
+ t106_produce $nitems $nprod $id $tnum $dbenv $order $niter $args
+} elseif { $runtype == "WAIT" } {
+ t106_consume $nitems $tnum $outputfile -consume_wait $dbenv $args
+} else {
+ error_check_good bad_args $runtype "either PRODUCE, or WAIT"
+}
+error_check_good env_close [$dbenv close] 0
+exit
diff --git a/db-4.8.30/test/test.tcl b/db-4.8.30/test/test.tcl
new file mode 100644
index 0000000..8ab9313
--- /dev/null
+++ b/db-4.8.30/test/test.tcl
@@ -0,0 +1,2633 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+
+source ./include.tcl
+
+# Add the default Windows build sub-directory to the path, so that
+# the binaries can be found without copies.
+if {[string match Win* $tcl_platform(os)]} {
+ global env
+ global buildpath
+ set env(PATH) "$env(PATH)\;$buildpath"
+}
+
+# Load DB's TCL API.
+load $tcllib
+
+if { [file exists $testdir] != 1 } {
+ file mkdir $testdir
+}
+
+global __debug_print
+global __debug_on
+global __debug_test
+
+#
+# Test if utilities work to figure out the path. Most systems
+# use ., but QNX has a problem with execvp of shell scripts which
+# causes it to break.
+#
+set stat [catch {exec ./db_printlog -?} ret]
+if { [string first "exec format error" $ret] != -1 } {
+ set util_path ./.libs
+} else {
+ set util_path .
+}
+set __debug_print 0
+set encrypt 0
+set old_encrypt 0
+set passwd test_passwd
+
+# Error stream that (should!) always go to the console, even if we're
+# redirecting to ALL.OUT.
+set consoleerr stderr
+
+set dict $test_path/wordlist
+set alphabet "abcdefghijklmnopqrstuvwxyz"
+set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
+
+# Random number seed.
+global rand_init
+set rand_init 11302005
+
+# Default record length for fixed record length access method(s)
+set fixed_len 20
+
+set recd_debug 0
+set log_log_record_types 0
+set ohandles {}
+
+# Normally, we're not running an all-tests-in-one-env run. This matters
+# for error stream/error prefix settings in berkdb_open.
+global is_envmethod
+set is_envmethod 0
+
+#
+# Set when we're running a child process in a rep test.
+#
+global is_repchild
+set is_repchild 0
+
+# Set when we want to use replication test messaging that cannot
+# share an env -- for example, because the replication processes
+# are not all from the same BDB version.
+global noenv_messaging
+set noenv_messaging 0
+
+# For testing locker id wrap around.
+global lock_curid
+global lock_maxid
+set lock_curid 0
+set lock_maxid 2147483647
+global txn_curid
+global txn_maxid
+set txn_curid 2147483648
+set txn_maxid 4294967295
+
+# The variable one_test allows us to run all the permutations
+# of a test with run_all or run_std.
+global one_test
+if { [info exists one_test] != 1 } {
+ set one_test "ALL"
+}
+
+# If you call a test with the proc find_valid_methods, it will
+# return the list of methods for which it will run, instead of
+# actually running.
+global checking_valid_methods
+set checking_valid_methods 0
+global valid_methods
+set valid_methods { btree rbtree queue queueext recno frecno rrecno hash }
+
+# The variable test_recopts controls whether we open envs in
+# replication tests with the -recover flag. The default is
+# to test with and without the flag, but to run a meaningful
+# subset of rep tests more quickly, rep_subset will randomly
+# pick one or the other.
+global test_recopts
+set test_recopts { "-recover" "" }
+
+# Set up any OS-specific values.
+source $test_path/testutils.tcl
+
+global tcl_platform
+set is_freebsd_test [string match FreeBSD $tcl_platform(os)]
+set is_hp_test [string match HP-UX $tcl_platform(os)]
+set is_linux_test [string match Linux $tcl_platform(os)]
+set is_qnx_test [string match QNX $tcl_platform(os)]
+set is_sunos_test [string match SunOS $tcl_platform(os)]
+set is_windows_test [string match Win* $tcl_platform(os)]
+set is_windows9x_test [string match "Windows 95" $tcl_platform(osVersion)]
+set is_je_test 0
+set upgrade_be [big_endian]
+global is_fat32
+set is_fat32 [string match FAT32 [lindex [file system check] 1]]
+global EXE BAT
+if { $is_windows_test == 1 } {
+ set EXE ".exe"
+ set BAT ".bat"
+} else {
+ set EXE ""
+ set BAT ""
+}
+
+if { $is_windows_test == 1 } {
+ set util_path "./$buildpath"
+}
+
+# This is where the test numbering and parameters now live.
+source $test_path/testparams.tcl
+source $test_path/db_reptest.tcl
+
+# Try to open an encrypted database. If it fails, this release
+# doesn't support encryption, and encryption tests should be skipped.
+set has_crypto 1
+set stat [catch {set db [eval {berkdb_open_noerr \
+ -create -btree -encryptaes test_passwd} ] } result ]
+if { $stat != 0 } {
+ # Make sure it's the right error for a non-crypto release.
+ error_check_good non_crypto_release \
+ [expr [is_substr $result "operation not supported"] || \
+ [is_substr $result "invalid argument"]] 1
+ set has_crypto 0
+} else {
+ # It is a crypto release. Get rid of the db, we don't need it.
+ error_check_good close_encrypted_db [$db close] 0
+}
+
+# Get the default page size of this system
+global default_pagesize
+set db [berkdb_open_noerr -create -btree]
+error_check_good "db open" [is_valid_db $db] TRUE
+set stat [catch {set default_pagesize [$db get_pagesize]} result]
+error_check_good "db get_pagesize" $stat 0
+error_check_good "db close" [$db close] 0
+
+# From here on out, test.tcl contains the procs that are used to
+# run all or part of the test suite.
+
+proc run_std { { testname ALL } args } {
+ global test_names
+ global one_test
+ global has_crypto
+ global valid_methods
+ source ./include.tcl
+
+ set one_test $testname
+ if { $one_test != "ALL" } {
+ # Source testparams again to adjust test_names.
+ source $test_path/testparams.tcl
+ }
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set am_only 0
+ set no_am 0
+ set std_only 1
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ A {
+ set std_only 0
+ }
+ M {
+ set no_am 1
+ puts "run_std: all but access method tests."
+ }
+ m {
+ set am_only 1
+ puts "run_std: access method tests only."
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ if { $std_only == 1 } {
+ fileremove -f ALL.OUT
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ }
+
+ set test_list {
+ {"environment" "env"}
+ {"archive" "archive"}
+ {"backup" "backup"}
+ {"file operations" "fop"}
+ {"locking" "lock"}
+ {"logging" "log"}
+ {"memory pool" "memp"}
+ {"transaction" "txn"}
+ {"deadlock detection" "dead"}
+ {"subdatabase" "sdb"}
+ {"byte-order" "byte"}
+ {"recno backing file" "rsrc"}
+ {"DBM interface" "dbm"}
+ {"NDBM interface" "ndbm"}
+ {"Hsearch interface" "hsearch"}
+ {"secondary index" "sindex"}
+ {"partition" "partition"}
+ {"compression" "compressed"}
+ {"replication manager" "repmgr"}
+ }
+
+ # If this is run_std only, run each rep test for a single
+ # access method. If run_all, run for all access methods.
+ if { $std_only == 1 } {
+ lappend test_list {"replication" "rep_subset"}
+ } else {
+ lappend test_list {"replication" "rep_complete"}
+ }
+
+ # If release supports encryption, run security tests.
+ if { $has_crypto == 1 } {
+ lappend test_list {"security" "sec"}
+ }
+
+ if { $am_only == 0 } {
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path << \
+ "global one_test; set one_test $one_test; \
+ source $test_path/test.tcl; r $rflags $cmd" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test: $res"
+ close $o
+ }
+ }
+
+ # Run recovery tests.
+ #
+ # XXX These too are broken into separate tclsh instantiations
+ # so we don't require so much memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # Note that we still wrap the test in an exec so that
+ # its output goes to ALL.OUT. run_recd will wrap each test
+ # so that both error streams go to stdout (which here goes
+ # to ALL.OUT); information that run_recd wishes to print
+ # to the "real" stderr, but outside the wrapping for each test,
+ # such as which tests are being skipped, it can still send to
+ # stderr.
+ puts "Running recovery tests"
+ if [catch {
+ exec $tclsh_path << \
+ "global one_test; set one_test $one_test; \
+ source $test_path/test.tcl; r $rflags recd" \
+ 2>@ stderr >> ALL.OUT
+ } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: recd tests: $res"
+ close $o
+ }
+
+ # Run join test
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ if { $one_test == "ALL" } {
+ puts "Running join test"
+ foreach test "join1 join2 join3 join4 join5 join6" {
+ if [catch {exec $tclsh_path << \
+ "source $test_path/test.tcl; r $rflags $test" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $test test: $res"
+ close $o
+ }
+ }
+ }
+ }
+
+ if { $no_am == 0 } {
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ foreach method $valid_methods {
+ puts "Running $method tests"
+ foreach test $test_names(test) {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_method \
+ -$method $test $display $run $o
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ run_method \
+ -$method $test $display $run"\
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL:$test $method: $res"
+ close $o
+ }
+ }
+ }
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ # If running in the context of the larger 'run_all' we don't
+ # check for failure here either.
+ if { $run == 0 || $std_only == 0 } {
+ return
+ }
+
+ set failed [check_output ALL.OUT]
+
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed"
+ puts "Check UNEXPECTED OUTPUT lines."
+ puts "Review ALL.OUT.x for details."
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+proc check_output { file } {
+ # These are all the acceptable patterns.
+ set pattern {(?x)
+ ^[:space:]*$|
+ .*?wrap\.tcl.*|
+ .*?dbscript\.tcl.*|
+ .*?ddscript\.tcl.*|
+ .*?mpoolscript\.tcl.*|
+ ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)$|
+ ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\sCrashing$|
+ ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s[p|P]rocesses\srunning:.*|
+ ^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s5\sprocesses\srunning.*|
+ ^\d:\sPut\s\d*\sstrings\srandom\soffsets.*|
+ ^100.*|
+ ^eval\s.*|
+ ^exec\s.*|
+ ^fileops:\s.*|
+ ^jointest.*$|
+ ^r\sarchive\s*|
+ ^r\sbackup\s*|
+ ^r\sdbm\s*|
+ ^r\shsearch\s*|
+ ^r\sndbm\s*|
+ ^r\srpc\s*|
+ ^run_recd:\s.*|
+ ^run_reptest\s.*|
+ ^run_rpcmethod:\s.*|
+ ^run_secenv:\s.*|
+ ^All\sprocesses\shave\sexited.$|
+ ^Backuptest\s.*|
+ ^Beginning\scycle\s\d$|
+ ^Byteorder:.*|
+ ^Child\sruns\scomplete\.\s\sParent\smodifies\sdata\.$|
+ ^Deadlock\sdetector:\s\d*\sCheckpoint\sdaemon\s\d*$|
+ ^Ending\srecord.*|
+ ^Environment\s.*?specified;\s\sskipping\.$|
+ ^Executing\srecord\s.*|
+ ^Freeing\smutex\s.*|
+ ^Join\stest:\.*|
+ ^Method:\s.*|
+ ^Putting\s.*databases.*|
+ ^Repl:\stest\d\d\d:.*|
+ ^Repl:\ssdb\d\d\d:.*|
+ ^Running\stest\ssdb.*|
+ ^Running\stest\stest.*|
+ ^run_inmem_db\s.*rep.*|
+ ^run_inmem_log\s.*rep.*|
+ ^run_mixedmode_log\s.*rep.*|
+ ^Script\swatcher\sprocess\s.*|
+ ^Secondary\sindex\sjoin\s.*|
+ ^Berkeley\sDB\s.*|
+ ^Test\ssuite\srun\s.*|
+ ^Test\s.*rep.*|
+ ^Unlinking\slog:\serror\smessage\sOK$|
+ ^Verifying\s.*|
+ ^\t*\.\.\.dbc->get.*$|
+ ^\t*\.\.\.dbc->put.*$|
+ ^\t*\.\.\.key\s\d.*$|
+ ^\t*\.\.\.Skipping\sdbc.*|
+ ^\t*and\s\d*\sduplicate\sduplicates\.$|
+ ^\t*About\sto\srun\srecovery\s.*complete$|
+ ^\t*Add\sa\sthird\sversion\s.*|
+ ^\t*Archive[:\.].*|
+ ^\t*Backuptest.*|
+ ^\t*Bigfile[0-9][0-9][0-9].*|
+ ^\t*Building\s.*|
+ ^\t*closing\ssecondaries\.$|
+ ^\t*Command\sexecuted\sand\s.*$|
+ ^\t*DBM.*|
+ ^\t*[d|D]ead[0-9][0-9][0-9].*|
+ ^\t*Dump\/load\sof.*|
+ ^\t*[e|E]nv[0-9][0-9][0-9].*|
+ ^\t*Executing\scommand$|
+ ^\t*Executing\stxn_.*|
+ ^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$|
+ ^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$|
+ ^\t*[f|F]op[0-9][0-9][0-9].*|
+ ^\t*HSEARCH.*|
+ ^\t*Initial\sCheckpoint$|
+ ^\t*Iteration\s\d*:\sCheckpointing\.$|
+ ^\t*Joining:\s.*|
+ ^\t*Kid[1|2]\sabort\.\.\.complete$|
+ ^\t*Kid[1|2]\scommit\.\.\.complete$|
+ ^\t*[l|L]ock[0-9][0-9][0-9].*|
+ ^\t*[l|L]og[0-9][0-9][0-9].*|
+ ^\t*[m|M]emp[0-9][0-9][0-9].*|
+ ^\t*[m|M]utex[0-9][0-9][0-9].*|
+ ^\t*NDBM.*|
+ ^\t*opening\ssecondaries\.$|
+ ^\t*op_recover_rec:\sRunning\srecovery.*|
+ ^\t*[r|R]ecd[0-9][0-9][0-9].*|
+ ^\t*[r|R]ep[0-9][0-9][0-9].*|
+ ^\t*[r|R]epmgr[0-9][0-9][0-9].*|
+ ^\t*[r|R]ep_test.*|
+ ^\t*[r|R]pc[0-9][0-9][0-9].*|
+ ^\t*[r|R]src[0-9][0-9][0-9].*|
+ ^\t*Recover\sfrom\sfirst\sdatabase$|
+ ^\t*Recover\sfrom\ssecond\sdatabase$|
+ ^\t*Remove\ssecond\sdb$|
+ ^\t*Rep_verify.*|
+ ^\t*Run_rpcmethod.*|
+ ^\t*Running\srecovery\son\s.*|
+ ^\t*[s|S]ec[0-9][0-9][0-9].*|
+ ^\t*[s|S]i[0-9][0-9][0-9].*|
+ ^\t*[s|S]ijoin.*|
+ ^\t*Salvage\stests\sof.*|
+ ^\t*sdb[0-9][0-9][0-9].*|
+ ^\t*Skipping\s.*|
+ ^\t*Subdb[0-9][0-9][0-9].*|
+ ^\t*Subdbtest[0-9][0-9][0-9].*|
+ ^\t*Syncing$|
+ ^\t*[t|T]est[0-9][0-9][0-9].*|
+ ^\t*[t|T]xn[0-9][0-9][0-9].*|
+ ^\t*Txnscript.*|
+ ^\t*Using\s.*?\senvironment\.$|
+ ^\t*Verification\sof.*|
+ ^\t*with\stransactions$}
+
+ set failed 0
+ set f [open $file r]
+ while { [gets $f line] >= 0 } {
+ if { [regexp $pattern $line] == 0 } {
+ puts -nonewline "UNEXPECTED OUTPUT: "
+ puts $line
+ set failed 1
+ }
+ }
+ close $f
+ return $failed
+}
+
+proc r { args } {
+ global test_names
+ global has_crypto
+ global rand_init
+ global one_test
+ global test_recopts
+ global checking_valid_methods
+
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set saveflags "--"
+ foreach f $flags {
+ switch $f {
+ n {
+ set display 1
+ set run 0
+ set saveflags "-n $saveflags"
+ }
+ }
+ }
+
+ if {[catch {
+ set sub [ lindex $args 0 ]
+ set starttest [lindex $args 1]
+ switch $sub {
+ bigfile -
+ dead -
+ env -
+ lock -
+ log -
+ memp -
+ multi_repmgr -
+ mutex -
+ rsrc -
+ sdbtest -
+ txn {
+ if { $display } {
+ run_subsystem $sub 1 0
+ }
+ if { $run } {
+ run_subsystem $sub
+ }
+ }
+ byte {
+ if { $one_test == "ALL" } {
+ run_test byteorder $display $run
+ }
+ }
+ archive -
+ backup -
+ dbm -
+ hsearch -
+ ndbm -
+ shelltest {
+ if { $one_test == "ALL" } {
+ if { $display } { puts "eval $sub" }
+ if { $run } {
+ check_handles
+ eval $sub
+ }
+ }
+ }
+ compact -
+ elect -
+ inmemdb -
+ init -
+ fop {
+ foreach test $test_names($sub) {
+ eval run_test $test $display $run
+ }
+ }
+ compressed {
+ set tindex [lsearch $test_names(test) $starttest]
+ if { $tindex == -1 } {
+ set tindex 0
+ }
+ set clist [lrange $test_names(test) $tindex end]
+ foreach test $clist {
+ eval run_compressed btree $test $display $run
+ }
+ }
+ join {
+ eval r $saveflags join1
+ eval r $saveflags join2
+ eval r $saveflags join3
+ eval r $saveflags join4
+ eval r $saveflags join5
+ eval r $saveflags join6
+ }
+ join1 {
+ if { $display } { puts "eval jointest" }
+ if { $run } {
+ check_handles
+ eval jointest
+ }
+ }
+ joinbench {
+ puts "[timestamp]"
+ eval r $saveflags join1
+ eval r $saveflags join2
+ puts "[timestamp]"
+ }
+ join2 {
+ if { $display } { puts "eval jointest 512" }
+ if { $run } {
+ check_handles
+ eval jointest 512
+ }
+ }
+ join3 {
+ if { $display } {
+ puts "eval jointest 8192 0 -join_item"
+ }
+ if { $run } {
+ check_handles
+ eval jointest 8192 0 -join_item
+ }
+ }
+ join4 {
+ if { $display } { puts "eval jointest 8192 2" }
+ if { $run } {
+ check_handles
+ eval jointest 8192 2
+ }
+ }
+ join5 {
+ if { $display } { puts "eval jointest 8192 3" }
+ if { $run } {
+ check_handles
+ eval jointest 8192 3
+ }
+ }
+ join6 {
+ if { $display } { puts "eval jointest 512 3" }
+ if { $run } {
+ check_handles
+ eval jointest 512 3
+ }
+ }
+ partition {
+ foreach method { btree hash } {
+ foreach test "$test_names(recd)\
+ $test_names(test)" {
+ run_range_partition\
+ $test $method $display $run
+ run_partition_callback\
+ $test $method $display $run
+ }
+ }
+ }
+ recd {
+ check_handles
+ eval {run_recds all $run $display} [lrange $args 1 end]
+ }
+ repmgr {
+ set tindex [lsearch $test_names(repmgr) $starttest]
+ if { $tindex == -1 } {
+ set tindex 0
+ }
+ set rlist [lrange $test_names(repmgr) $tindex end]
+ foreach test $rlist {
+ run_test $test $display $run
+ }
+ }
+ rep {
+ r rep_subset $starttest
+ }
+ # To run a subset of the complete rep tests, use
+ # rep_subset, which randomly picks an access type to
+ # use, and randomly picks whether to open envs with
+ # the -recover flag.
+ rep_subset {
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+ berkdb srand $rand_init
+ set tindex [lsearch $test_names(rep) $starttest]
+ if { $tindex == -1 } {
+ set tindex 0
+ }
+ set rlist [lrange $test_names(rep) $tindex end]
+ foreach test $rlist {
+ set random_recopt \
+ [berkdb random_int 0 1]
+ if { $random_recopt == 1 } {
+ set test_recopts "-recover"
+ } else {
+ set test_recopts {""}
+ }
+
+ set method_list \
+ [find_valid_methods $test]
+ set list_length \
+ [expr [llength $method_list] - 1]
+ set method_index \
+ [berkdb random_int 0 $list_length]
+ set rand_method \
+ [lindex $method_list $method_index]
+
+ if { $display } {
+ puts "eval $test $rand_method; \
+ verify_dir \
+ $testdir \"\" 1 0 $nodump; \
+ salvage_dir $testdir"
+ }
+ if { $run } {
+ check_handles
+ eval $test $rand_method
+ verify_dir $testdir "" 1 0 $nodump
+ salvage_dir $testdir
+ }
+ }
+ if { $one_test == "ALL" } {
+ if { $display } {
+ #puts "basic_db_reptest"
+ #puts "basic_db_reptest 1"
+ }
+ if { $run } {
+ #basic_db_reptest
+ #basic_db_reptest 1
+ }
+ }
+ set test_recopts { "-recover" "" }
+ }
+ rep_complete {
+ set tindex [lsearch $test_names(rep) $starttest]
+ if { $tindex == -1 } {
+ set tindex 0
+ }
+ set rlist [lrange $test_names(rep) $tindex end]
+ foreach test $rlist {
+ run_test $test $display $run
+ }
+ if { $one_test == "ALL" } {
+ if { $display } {
+ #puts "basic_db_reptest"
+ #puts "basic_db_reptest 1"
+ }
+ if { $run } {
+ #basic_db_reptest
+ #basic_db_reptest 1
+ }
+ }
+ }
+ repmethod {
+ # We seed the random number generator here
+ # instead of in run_repmethod so that we
+ # aren't always reusing the first few
+ # responses from random_int.
+ #
+ berkdb srand $rand_init
+ foreach sub { test sdb } {
+ foreach test $test_names($sub) {
+ eval run_test run_repmethod \
+ $display $run $test
+ }
+ }
+ }
+ rpc {
+ if { $one_test == "ALL" } {
+ if { $display } { puts "r $sub" }
+ global BAT EXE rpc_svc svc_list
+ global rpc_svc svc_list is_je_test
+ set old_rpc_src $rpc_svc
+ foreach rpc_svc $svc_list {
+ if { $rpc_svc == "berkeley_dbje_svc" } {
+ set old_util_path $util_path
+ set util_path $je_root/dist
+ set is_je_test 1
+ }
+
+ if { !$run || \
+ ![file exist $util_path/$rpc_svc$BAT] || \
+ ![file exist $util_path/$rpc_svc$EXE] } {
+ continue
+ }
+
+ run_subsystem rpc
+ if { [catch {run_rpcmethod -txn} ret] != 0 } {
+ puts $ret
+ }
+
+ if { $is_je_test } {
+ check_handles
+ eval run_rpcmethod -btree
+ verify_dir $testdir "" 1
+ salvage_dir $testdir
+ } else {
+ run_test run_rpcmethod $display $run
+ }
+
+ if { $is_je_test } {
+ set util_path $old_util_path
+ set is_je_test 0
+ }
+
+ }
+ set rpc_svc $old_rpc_src
+ }
+ }
+ sec {
+ # Skip secure mode tests if release
+ # does not support encryption.
+ if { $has_crypto == 0 } {
+ return
+ }
+ if { $display } {
+ run_subsystem $sub 1 0
+ }
+ if { $run } {
+ run_subsystem $sub 0 1
+ }
+ }
+ secmethod {
+ # Skip secure mode tests if release
+ # does not support encryption.
+ if { $has_crypto == 0 } {
+ return
+ }
+ foreach test $test_names(test) {
+ eval run_test run_secmethod \
+ $display $run $test
+ eval run_test run_secenv \
+ $display $run $test
+ }
+ }
+ sdb {
+ if { $one_test == "ALL" } {
+ if { $display } {
+ run_subsystem sdbtest 1 0
+ }
+ if { $run } {
+ run_subsystem sdbtest 0 1
+ }
+ }
+ foreach test $test_names(sdb) {
+ eval run_test $test $display $run
+ }
+ }
+ sindex {
+ if { $one_test == "ALL" } {
+ if { $display } {
+ sindex 1 0
+ sijoin 1 0
+ }
+ if { $run } {
+ sindex 0 1
+ sijoin 0 1
+ }
+ }
+ }
+ btree -
+ rbtree -
+ hash -
+ iqueue -
+ iqueueext -
+ queue -
+ queueext -
+ recno -
+ frecno -
+ rrecno {
+ foreach test $test_names(test) {
+ eval run_method [lindex $args 0] $test \
+ $display $run stdout [lrange $args 1 end]
+ }
+ }
+
+ default {
+ error \
+ "FAIL:[timestamp] r: $args: unknown command"
+ }
+ }
+ flush stdout
+ flush stderr
+ } res] != 0} {
+ global errorInfo;
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp] r: $args: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_subsystem { sub { display 0 } { run 1} } {
+ global test_names
+
+ if { [info exists test_names($sub)] != 1 } {
+ puts stderr "Subsystem $sub has no tests specified in\
+ testparams.tcl; skipping."
+ return
+ }
+ foreach test $test_names($sub) {
+ if { $display } {
+ puts "eval $test"
+ }
+ if { $run } {
+ check_handles
+ if {[catch {eval $test} ret] != 0 } {
+ puts "FAIL: run_subsystem: $sub $test: \
+ $ret"
+ }
+ }
+ }
+}
+
+proc run_test { test {display 0} {run 1} args } {
+ source ./include.tcl
+ global valid_methods
+
+ foreach method $valid_methods {
+ if { $display } {
+ puts "eval $test -$method $args; \
+ verify_dir $testdir \"\" 1; \
+ salvage_dir $testdir"
+ }
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+ if { $run } {
+ check_handles
+ eval {$test -$method} $args
+ verify_dir $testdir "" 1 0 $nodump
+ salvage_dir $testdir
+ }
+ }
+}
+
+proc run_method { method test {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global test_names
+ global parms
+ source ./include.tcl
+
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ if {[catch {
+ if { $display } {
+ puts -nonewline $outfile "eval \{ $test \} $method"
+ puts -nonewline $outfile " $parms($test) { $args }"
+ puts -nonewline $outfile " ; verify_dir $testdir \"\" 1 0 $nodump"
+ puts $outfile " ; salvage_dir $testdir"
+ }
+ if { $run } {
+ check_handles $outfile
+ puts $outfile "[timestamp]"
+ eval {$test} $method $parms($test) $args
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ # Verify all databases the test leaves behind
+ verify_dir $testdir "" 1 0 $nodump
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ salvage_dir $testdir
+ }
+ flush stdout
+ flush stderr
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_method: $method $test: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_rpcmethod { method {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global rpc_tests
+ global parms
+ global is_envmethod
+ global rpc_svc
+ source ./include.tcl
+
+ puts "run_rpcmethod: $method $largs using $rpc_svc"
+
+ set save_largs $largs
+ set dpid [rpc_server_start]
+ puts "\tRun_rpcmethod.a: started server, pid $dpid"
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ set ntxns 32
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ set stat [catch {eval txn003} res]
+ error_check_good envclose [$env close] 0
+ } else {
+ foreach test $rpc_tests($rpc_svc) {
+ set stat [catch {
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 128Mb. Otherwise
+ # some tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 134217728 1}}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ set largs $save_largs
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ puts "Running test $test with RPC service $rpc_svc"
+ puts "eval $test $method $parms($test) $largs"
+ eval $test $method $parms($test) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ error_check_good envclose [$env close] 0
+ set env ""
+ } res]
+
+ if { $stat != 0} {
+ global errorInfo;
+
+ puts "$res"
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ puts "FAIL:[timestamp]\
+ run_rpcmethod: $method $test: $errorInfo"
+ } else {
+ puts $theError;
+ }
+
+ catch { $env close } ignore
+ set env ""
+ tclkill $dpid
+ set dpid [rpc_server_start]
+ }
+ }
+ }
+ set is_envmethod 0
+ tclkill $dpid
+}
+
+proc run_rpcnoserver { method {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global test_names
+ global parms
+ global is_envmethod
+ source ./include.tcl
+
+ puts "run_rpcnoserver: $method $largs"
+
+ set save_largs $largs
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ set ntxns 32
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ } else {
+ set stat [catch {
+ foreach test $test_names {
+ check_handles
+ if { [info exists parms($test)] != 1 } {
+ puts stderr "$test disabled in \
+ testparams.tcl; skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1} }]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $test $method $parms($test) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcnoserver: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+# Run a testNNN or recdNNN test with range partitioning.
+proc run_range_partition { test method {display 0} {run 1}\
+ {outfile stdout} args } {
+
+ # The only allowed access method for range partitioning is btree.
+ if { [is_btree $method] == 0 } {
+ if { $display == 0 } {
+ puts "Skipping range partition\
+ tests for method $method"
+ }
+ return
+ }
+
+ # If we've passed in explicit partitioning args, use them;
+ # otherwise set them. This particular selection hits some
+ # interesting cases where we set the key to "key".
+ set largs $args
+ if { [is_partitioned $args] == 0 } {
+ lappend largs -partition {ab cd key key1 zzz}
+ }
+
+ if { [string first recd $test] == 0 } {
+ eval {run_recd $method $test $run $display} $largs
+ } elseif { [string first test $test] == 0 } {
+ eval {run_method $method $test $display $run $outfile} $largs
+ } else {
+ puts "Skipping test $test with range partitioning."
+ }
+}
+
+# Run a testNNN or recdNNN test with partition callbacks.
+proc run_partition_callback { test method {display 0} {run 1}\
+ {outfile stdout} args } {
+
+ # The only allowed access methods are btree and hash.
+ if { [is_btree $method] == 0 && [is_hash $method] == 0 } {
+ if { $display == 0 } {
+ puts "Skipping partition callback tests\
+ for method $method"
+ }
+ return
+ }
+
+ # If we've passed in explicit partitioning args, use them;
+ # otherwise set them.
+ set largs $args
+ if { [is_partition_callback $args] == 0 } {
+ lappend largs -partition_callback 5 part
+ }
+
+ if { [string first recd $test] == 0 } {
+ eval {run_recd $method $test $run $display} $largs
+ } elseif { [string first test $test] == 0 } {
+ eval {run_method $method $test $display $run $outfile} $largs
+ } else {
+ puts "Skipping test $test with partition callbacks."
+ }
+}
+
+#
+# Run method tests for btree only using compression.
+#
+proc run_compressed { method test {display 0} {run 1} \
+ { outfile stdout } args } {
+
+ if { [is_btree $method] == 0 } {
+ puts "Skipping compression test for method $method."
+ return
+ }
+
+ set largs $args
+ append largs " -compress "
+ eval run_method $method $test $display $run $outfile $largs
+}
+
+#
+# Run method tests in secure mode.
+#
+proc run_secmethod { method test {display 0} {run 1} \
+ { outfile stdout } args } {
+ global passwd
+ global has_crypto
+
+ # Skip secure mode tests if release does not support encryption.
+ if { $has_crypto == 0 } {
+ return
+ }
+
+ set largs $args
+ append largs " -encryptaes $passwd "
+ eval run_method $method $test $display $run $outfile $largs
+}
+
+#
+# Run method tests each in its own, new secure environment.
+#
+proc run_secenv { method test {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global has_crypto
+ global test_names
+ global parms
+ global passwd
+ source ./include.tcl
+
+ # Skip secure mode tests if release does not support encryption.
+ if { $has_crypto == 0 } {
+ return
+ }
+
+ puts "run_secenv: $method $test $largs"
+
+ set save_largs $largs
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ check_handles
+ set env [eval {berkdb_env -create -mode 0644 -home $testdir \
+ -encryptaes $passwd -pagesize 512 -cachesize {0 4194304 1}}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ if { [info exists parms($test)] != 1 } {
+ puts stderr "$test disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+
+ #
+ # Run each test multiple times in the secure env.
+ # Once with a secure env + clear database
+ # Once with a secure env + secure database
+ #
+ eval $test $method $parms($test) $largs
+ append largs " -encrypt "
+ eval $test $method $parms($test) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir -encryptaes $passwd] 0
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_secenv: $method $test: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
+ {do_sec 0} {do_oob 0} {largs "" } } {
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global parms
+ global passwd
+ global has_crypto
+
+ puts "run_reptest \
+ $method $test $droppct $nclients $do_del $do_sec $do_oob $largs"
+
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ if { $do_sec && $has_crypto } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+
+ set largs [repl_envsetup \
+ $envargs $largs $test $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ if { [info exists parms($test)] != 1 } {
+ puts stderr "$test disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+
+ puts -nonewline \
+ "Repl: $test: dropping $droppct%, $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $test $method $parms($test) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $test $nclients $do_oob
+ repl_envver0 $test $method $nclients
+ if { $do_del } {
+ repl_verdel $test $method $nclients
+ }
+ repl_envclose $test $envargs
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_reptest: $method $test: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_repmethod { method test {numcl 0} {display 0} {run 1} \
+ {outfile stdout} {largs ""} } {
+ source ./include.tcl
+ if { $is_windows9x_test == 1 } {
+ puts "Skipping replication test on Win 9x platform."
+ return
+ }
+
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global test_names
+ global parms
+ global has_crypto
+ global passwd
+
+ set save_largs $largs
+ env_cleanup $testdir
+
+ # Use an array for number of clients because we really don't
+ # want to evenly-weight all numbers of clients. Favor smaller
+ # numbers but test more clients occasionally.
+ set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
+ set drop_len [expr [llength $drop_list] - 1]
+ set client_list { 1 1 2 1 1 1 2 2 3 1 }
+ set cl_len [expr [llength $client_list] - 1]
+
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+
+ # Do not drop messages on Windows. Since we can't set
+ # re-request times with less than millisecond precision,
+ # dropping messages will cause test failures.
+ if { $is_windows_test == 1 } {
+ set droppct 0
+ }
+
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $display == 1 } {
+ puts $outfile "eval run_reptest $method $test $droppct \
+ $nclients $do_del $do_sec $do_oob $largs"
+ }
+ if { $run == 1 } {
+ run_reptest $method $test $droppct $nclients $do_del \
+ $do_sec $do_oob $largs
+ }
+}
+
+#
+# Run method tests, each in its own, new environment. (As opposed to
+# run_envmethod1 which runs all the tests in a single environment.)
+#
+proc run_envmethod { method test {display 0} {run 1} {outfile stdout} \
+ { largs "" } } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global test_names
+ global parms
+ source ./include.tcl
+
+ set save_largs $largs
+ set envargs ""
+
+ # Enlarge the logging region by default - sdb004 needs this because
+ # it uses very long subdb names, and the names are stored in the
+ # env region.
+ set logargs " -log_regionmax 2057152 "
+
+ # Enlarge the cache by default - some compaction tests need it.
+ set cacheargs "-cachesize {0 4194304 1} -pagesize 512"
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ puts $outfile "eval run_envmethod $method \
+ $test 0 1 stdout $largs"
+ }
+
+ # To run a normal test using system memory, call run_envmethod
+ # with the flag -shm.
+ set sindex [lsearch -exact $largs "-shm"]
+ if { $sindex >= 0 } {
+ if { [mem_chk " -system_mem -shm_key 1 "] == 1 } {
+ break
+ } else {
+ append envargs " -system_mem -shm_key 1 "
+ set largs [lreplace $largs $sindex $sindex]
+ }
+ }
+
+ set sindex [lsearch -exact $largs "-log_max"]
+ if { $sindex >= 0 } {
+ append envargs " -log_max 100000 "
+ set largs [lreplace $largs $sindex $sindex]
+ }
+
+ # Test for -thread option and pass to berkdb_env open. Leave in
+ # $largs because -thread can also be passed to an individual
+ # test as an arg. Double the number of lockers because a threaded
+ # env requires more than an ordinary env.
+ if { [lsearch -exact $largs "-thread"] != -1 } {
+ append envargs " -thread -lock_max_lockers 2000 "
+ }
+
+ # Test for -alloc option and pass to berkdb_env open only.
+ # Remove from largs because -alloc is not an allowed test arg.
+ set aindex [lsearch -exact $largs "-alloc"]
+ if { $aindex >= 0 } {
+ append envargs " -alloc "
+ set largs [lreplace $largs $aindex $aindex]
+ }
+
+ # We raise the number of locks and objects - there are a few
+ # compaction tests that require a large number.
+ set lockargs " -lock_max_locks 40000 -lock_max_objects 20000 "
+
+ if { $run == 1 } {
+ set is_envmethod 1
+ set stat [catch {
+ check_handles
+ set env [eval {berkdb_env -create -txn -mode 0644 \
+ -home $testdir} $logargs $cacheargs $lockargs $envargs]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ if { [info exists parms($test)] != 1 } {
+ puts stderr "$test disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $test $method $parms($test) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+# error_check_good envremove [berkdb envremove \
+# -home $testdir] 0
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $test: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+proc run_compact { method } {
+ source ./include.tcl
+ for {set tnum 111} {$tnum <= 115} {incr tnum} {
+ run_envmethod $method test$tnum 0 1 stdout -log_max
+
+ puts "\tTest$tnum: Test Recovery"
+ set env1 [eval berkdb env -create -txn \
+ -recover_fatal -home $testdir]
+ error_check_good env_close [$env1 close] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "" 0 0 1 ] 0
+ puts "\tTest$tnum: Remove db and test Recovery"
+ exec sh -c "rm -f $testdir/*.db"
+ set env1 [eval berkdb env -create -txn \
+ -recover_fatal -home $testdir]
+ error_check_good env_close [$env1 close] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "" 0 0 1 ] 0
+ }
+}
+
+proc run_recd { method test {run 1} {display 0} args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global parms
+ global test_names
+ global log_log_record_types
+ global gen_upgrade_log
+ global upgrade_be
+ global upgrade_dir
+ global upgrade_method
+ global upgrade_name
+ source ./include.tcl
+
+ if { $run == 1 } {
+ puts "run_recd: $method $test $parms($test) $args"
+ }
+ if {[catch {
+ if { $display } {
+ puts "eval { $test } $method $parms($test) { $args }"
+ }
+ if { $run } {
+ check_handles
+ set upgrade_method $method
+ set upgrade_name $test
+ puts "[timestamp]"
+ # By redirecting stdout to stdout, we make exec
+ # print output rather than simply returning it.
+ # By redirecting stderr to stdout too, we make
+ # sure everything winds up in the ALL.OUT file.
+ set ret [catch { exec $tclsh_path << \
+ "source $test_path/test.tcl; \
+ set log_log_record_types $log_log_record_types;\
+ set gen_upgrade_log $gen_upgrade_log;\
+ set upgrade_be $upgrade_be; \
+ set upgrade_dir $upgrade_dir; \
+ set upgrade_method $upgrade_method; \
+ set upgrade_name $upgrade_name; \
+ eval { $test } $method $parms($test) {$args}" \
+ >&@ stdout
+ } res]
+
+ # Don't die if the test failed; we want
+ # to just proceed.
+ if { $ret != 0 } {
+ puts "FAIL:[timestamp] $res"
+ }
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_recd: $method: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc recds {method args} {
+ eval {run_recds $method 1 0} $args
+}
+
+proc run_recds {{run_methods "all"} {run 1} {display 0} args } {
+ source ./include.tcl
+ global log_log_record_types
+ global test_names
+ global gen_upgrade_log
+ global encrypt
+ global valid_methods
+
+ set log_log_record_types 1
+ set run_zero 0
+ if { $run_methods == "all" } {
+ set run_methods $valid_methods
+ set run_zero 1
+ }
+ logtrack_init
+
+ # Define a small set of tests to run with log file zeroing.
+ set zero_log_tests \
+ {recd001 recd002 recd003 recd004 recd005 recd006 recd007}
+
+ foreach method $run_methods {
+ check_handles
+#set test_names(recd) "recd005 recd017"
+ foreach test $test_names(recd) {
+ # Skip recd017 for non-crypto upgrade testing.
+ # Run only recd017 for crypto upgrade testing.
+ if { $gen_upgrade_log == 1 && $test == "recd017" && \
+ $encrypt == 0 } {
+ puts "Skipping recd017 for non-crypto run."
+ continue
+ }
+ if { $gen_upgrade_log == 1 && $test != "recd017" && \
+ $encrypt == 1 } {
+ puts "Skipping $test for crypto run."
+ continue
+ }
+ if { [catch {eval {run_recd $method $test $run \
+ $display} $args} ret ] != 0 } {
+ puts $ret
+ }
+
+ # If it's one of the chosen tests, and btree, run with
+ # log file zeroing.
+ set zlog_idx [lsearch -exact $zero_log_tests $test]
+ if { $run_zero == 1 && \
+ $method == "btree" && $zlog_idx > -1 } {
+ if { [catch {eval {run_recd $method $test \
+ $run $display -zero_log} $args} ret ] != 0 } {
+ puts $ret
+ }
+ }
+
+ if { $gen_upgrade_log == 1 } {
+ save_upgrade_files $testdir
+ }
+ }
+ }
+
+ # We can skip logtrack_summary during the crypto upgrade run -
+ # it doesn't introduce any new log types.
+ if { $run } {
+ if { $gen_upgrade_log == 0 || $encrypt == 0 } {
+ logtrack_summary
+ }
+ }
+ set log_log_record_types 0
+}
+
+# A small subset of tests to be used in conjunction with the
+# automated builds. Ideally these tests will cover a lot of ground
+# but run in only 15 minutes or so. You can put any test in the
+# list of tests and it will be run all the ways that run_all
+# runs it.
+proc run_smoke { } {
+ source ./include.tcl
+ global valid_methods
+
+ fileremove -f SMOKE.OUT
+
+ set smoke_tests { \
+ lock001 log001 test001 test004 sdb001 sec001 rep001 txn001 }
+
+ # Run each test in all its permutations, and
+ # concatenate the results in the file SMOKE.OUT.
+ foreach test $smoke_tests {
+ run_all $test
+ set in [open ALL.OUT r]
+ set out [open SMOKE.OUT a]
+ while { [gets $in str] != -1 } {
+ puts $out $str
+ }
+ close $in
+ close $out
+ }
+}
+
+proc run_all { { testname ALL } args } {
+ global test_names
+ global one_test
+ global has_crypto
+ global valid_methods
+ source ./include.tcl
+
+ fileremove -f ALL.OUT
+
+ set one_test $testname
+ if { $one_test != "ALL" } {
+ # Source testparams again to adjust test_names.
+ source $test_path/testparams.tcl
+ }
+
+ set exflgs [eval extractflags $args]
+ set flags [lindex $exflgs 1]
+ set display 1
+ set run 1
+ set am_only 0
+ set parallel 0
+ set nparalleltests 0
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ m {
+ set am_only 1
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ #
+ # First run standard tests. Send in a -A to let run_std know
+ # that it is part of the "run_all" run, so that it doesn't
+ # print out start/end times.
+ #
+ lappend args -A
+ eval {run_std} $one_test $args
+
+ set test_pagesizes [get_test_pagesizes]
+ set args [lindex $exflgs 0]
+ set save_args $args
+
+ foreach pgsz $test_pagesizes {
+ set args $save_args
+ append args " -pagesize $pgsz -chksum"
+ if { $am_only == 0 } {
+ # Run recovery tests.
+ #
+ # XXX These don't actually work at multiple pagesizes;
+ # disable them for now.
+ #
+ # XXX These too are broken into separate tclsh
+ # instantiations so we don't require so much
+ # memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # XXX See comment in run_std for why this only directs
+ # stdout and not stderr. Don't worry--the right stuff
+ # happens.
+ #puts "Running recovery tests with pagesize $pgsz"
+ #if [catch {exec $tclsh_path \
+ # << "source $test_path/test.tcl; \
+ # r $rflags recd $args" \
+ # 2>@ stderr >> ALL.OUT } res] {
+ # set o [open ALL.OUT a]
+ # puts $o "FAIL: recd test:"
+ # puts $o $res
+ # close $o
+ #}
+ }
+
+ # Access method tests.
+ # Run subdb tests with varying pagesizes too.
+ # XXX
+ # Broken up into separate tclsh instantiations so
+ # we don't require so much memory.
+ foreach method $valid_methods {
+ puts "Running $method tests with pagesize $pgsz"
+ foreach sub {test sdb si} {
+ foreach test $test_names($sub) {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_method -$method \
+ $test $display $run $o} \
+ $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ eval {run_method -$method \
+ $test $display $run \
+ stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: \
+ -$method $test: $res"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ }
+ set args $save_args
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach method $valid_methods {
+ puts "Running $method tests in a txn env"
+ foreach sub {test sdb si} {
+ foreach test $test_names($sub) {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_envmethod -$method $test $display \
+ $run $o $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ run_envmethod -$method $test \
+ $display $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: run_envmethod \
+ $method $test: $res"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ #
+ # Run access method tests at default page size in thread-enabled env.
+ # We're not truly running threaded tests, just testing the interface.
+ #
+ foreach method $valid_methods {
+ puts "Running $method tests in a threaded txn env"
+ foreach sub {test sdb si} {
+ foreach test $test_names($sub) {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_envmethod -$method $test \
+ $display $run $o -thread}
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ eval {run_envmethod -$method $test \
+ $display $run stdout -thread}" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: run_envmethod \
+ $method $test -thread: $res"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ #
+ # Run access method tests at default page size with -alloc enabled.
+ #
+ foreach method $valid_methods {
+ puts "Running $method tests in an env with -alloc"
+ foreach sub {test sdb si} {
+ foreach test $test_names($sub) {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_envmethod -$method $test \
+ $display $run $o -alloc}
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ eval {run_envmethod -$method $test \
+ $display $run stdout -alloc}" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: run_envmethod \
+ $method $test -alloc: $res"
+ close $o
+ }
+ }
+ }
+ }
+ }
+
+ # Run standard access method tests under replication.
+ #
+ set test_list [list {"testNNN under replication" "repmethod"}]
+
+ # If we're on Windows, Linux, FreeBSD, or Solaris, run the
+ # bigfile tests. These create files larger than 4 GB.
+ if { $is_freebsd_test == 1 || $is_linux_test == 1 || \
+ $is_sunos_test == 1 || $is_windows_test == 1 } {
+ lappend test_list {"big files" "bigfile"}
+ }
+
+ # If release supports encryption, run security tests.
+ #
+ if { $has_crypto == 1 } {
+ lappend test_list {"testNNN with security" "secmethod"}
+ }
+ #
+ # If configured for RPC, then run rpc tests too.
+ #
+ if { [file exists ./berkeley_db_svc] ||
+ [file exists ./berkeley_db_cxxsvc] ||
+ [file exists ./berkeley_db_javasvc] } {
+ lappend test_list {"RPC" "rpc"}
+ }
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path << \
+ "global one_test; set one_test $one_test; \
+ source $test_path/test.tcl; \
+ r $rflags $cmd $args" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test: $res"
+ close $o
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ if { $run == 0 } {
+ return
+ }
+
+ set failed 0
+ set o [open ALL.OUT r]
+ while { [gets $o line] >= 0 } {
+ if { [regexp {^FAIL} $line] != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+proc run_all_new { { testname ALL } args } {
+ global test_names
+ global one_test
+ global has_crypto
+ global valid_methods
+ source ./include.tcl
+
+ fileremove -f ALL.OUT
+
+ set one_test $testname
+ if { $one_test != "ALL" } {
+ # Source testparams again to adjust test_names.
+ source $test_path/testparams.tcl
+ }
+
+ set exflgs [eval extractflags $args]
+ set flags [lindex $exflgs 1]
+ set display 1
+ set run 1
+ set am_only 0
+ set parallel 0
+ set nparalleltests 0
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ m {
+ set am_only 1
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ #
+ # First run standard tests. Send in a -A to let run_std know
+ # that it is part of the "run_all" run, so that it doesn't
+ # print out start/end times.
+ #
+ lappend args -A
+ eval {run_std} $one_test $args
+
+ set test_pagesizes [get_test_pagesizes]
+ set args [lindex $exflgs 0]
+ set save_args $args
+
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach method $valid_methods {
+ puts "Running $method tests in a txn env"
+ foreach sub {test sdb si} {
+ foreach test $test_names($sub) {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_envmethod -$method $test $display \
+ $run $o $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ run_envmethod -$method $test \
+ $display $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: run_envmethod \
+ $method $test: $res"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ #
+ # Run access method tests at default page size in thread-enabled env.
+ # We're not truly running threaded tests, just testing the interface.
+ #
+ foreach method $valid_methods {
+ puts "Running $method tests in a threaded txn env"
+ set thread_tests "test001"
+ foreach test $thread_tests {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_envmethod -$method $test \
+ $display $run $o -thread}
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ eval {run_envmethod -$method $test \
+ $display $run stdout -thread}" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: run_envmethod \
+ $method $test -thread: $res"
+ close $o
+ }
+ }
+ }
+ }
+ #
+ # Run access method tests at default page size with -alloc enabled.
+ #
+ foreach method $valid_methods {
+ puts "Running $method tests in an env with -alloc"
+ set alloc_tests "test001"
+ foreach test $alloc_tests {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_envmethod -$method $test \
+ $display $run $o -alloc}
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path << \
+ "global one_test; \
+ set one_test $one_test; \
+ source $test_path/test.tcl; \
+ eval {run_envmethod -$method $test \
+ $display $run stdout -alloc}" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: run_envmethod \
+ $method $test -alloc: $res"
+ close $o
+ }
+ }
+ }
+ }
+
+ # Run standard access method tests under replication.
+ #
+ set test_list [list {"testNNN under replication" "repmethod"}]
+
+ # If we're on Windows, Linux, FreeBSD, or Solaris, run the
+ # bigfile tests. These create files larger than 4 GB.
+ if { $is_freebsd_test == 1 || $is_linux_test == 1 || \
+ $is_sunos_test == 1 || $is_windows_test == 1 } {
+ lappend test_list {"big files" "bigfile"}
+ }
+
+ # If release supports encryption, run security tests.
+ #
+ if { $has_crypto == 1 } {
+ lappend test_list {"testNNN with security" "secmethod"}
+ }
+ #
+ # If configured for RPC, then run rpc tests too.
+ #
+ if { [file exists ./berkeley_db_svc] ||
+ [file exists ./berkeley_db_cxxsvc] ||
+ [file exists ./berkeley_db_javasvc] } {
+ lappend test_list {"RPC" "rpc"}
+ }
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path << \
+ "global one_test; set one_test $one_test; \
+ source $test_path/test.tcl; \
+ r $rflags $cmd $args" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test: $res"
+ close $o
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ if { $run == 0 } {
+ return
+ }
+
+ set failed 0
+ set o [open ALL.OUT r]
+ while { [gets $o line] >= 0 } {
+ if { [regexp {^FAIL} $line] != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+#
+# Run method tests in one environment. (As opposed to run_envmethod
+# which runs each test in its own, new environment.)
+#
+proc run_envmethod1 { method {display 0} {run 1} { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global test_names
+ global parms
+ source ./include.tcl
+
+ if { $run == 1 } {
+ puts "run_envmethod1: $method $args"
+ }
+
+ set is_envmethod 1
+ if { $run == 1 } {
+ check_handles
+ env_cleanup $testdir
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
+ {-pagesize 512 -mode 0644 -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+ }
+
+ if { $display } {
+ # The envmethod1 tests can't be split up, since they share
+ # an env.
+ puts $outfile "eval run_envmethod1 $method $args"
+ }
+
+ set stat [catch {
+ foreach test $test_names(test) {
+ if { [info exists parms($test)] != 1 } {
+ puts stderr "$test disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $test $method $parms($test) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $test: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ foreach test $test_names(test) {
+ if { [info exists parms($test)] != 1 } {
+ puts stderr "$test disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $test $method $parms($test) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod1: $method $test: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ if { $run == 1 } {
+ error_check_good envclose [$env close] 0
+ check_handles $outfile
+ }
+ set is_envmethod 0
+
+}
+
+# Run the secondary index tests.
+proc sindex { {display 0} {run 1} {outfile stdout} {verbose 0} args } {
+ global test_names
+ global testdir
+ global verbose_check_secondaries
+ set verbose_check_secondaries $verbose
+ # Standard number of secondary indices to create if a single-element
+ # list of methods is passed into the secondary index tests.
+ global nsecondaries
+ set nsecondaries 2
+
+ # Run basic tests with a single secondary index and a small number
+ # of keys, then again with a larger number of keys. (Note that
+ # we can't go above 5000, since we use two items from our
+ # 10K-word list for each key/data pair.)
+ foreach n { 200 5000 } {
+ foreach pm { btree hash recno frecno queue queueext } {
+ foreach sm { dbtree dhash ddbtree ddhash btree hash } {
+ foreach test $test_names(si) {
+ if { $display } {
+ puts -nonewline $outfile \
+ "eval $test {\[list\
+ $pm $sm $sm\]} $n ;"
+ puts -nonewline $outfile \
+ " verify_dir \
+ $testdir \"\" 1; "
+ puts $outfile " salvage_dir \
+ $testdir "
+ }
+ if { $run } {
+ check_handles $outfile
+ eval $test \
+ {[list $pm $sm $sm]} $n
+ verify_dir $testdir "" 1
+ salvage_dir $testdir
+ }
+ }
+ }
+ }
+ }
+
+ # Run tests with 20 secondaries.
+ foreach pm { btree hash } {
+ set methlist [list $pm]
+ for { set j 1 } { $j <= 20 } {incr j} {
+ # XXX this should incorporate hash after #3726
+ if { $j % 2 == 0 } {
+ lappend methlist "dbtree"
+ } else {
+ lappend methlist "ddbtree"
+ }
+ }
+ foreach test $test_names(si) {
+ if { $display } {
+ puts "eval $test {\[list $methlist\]} 500"
+ }
+ if { $run } {
+ eval $test {$methlist} 500
+ }
+ }
+ }
+}
+
+# Run secondary index join test. (There's no point in running
+# this with both lengths, the primary is unhappy for now with fixed-
+# length records (XXX), and we need unsorted dups in the secondaries.)
+proc sijoin { {display 0} {run 1} {outfile stdout} } {
+ foreach pm { btree hash recno } {
+ if { $display } {
+ foreach sm { btree hash } {
+ puts $outfile "eval sijointest\
+ {\[list $pm $sm $sm\]} 1000"
+ }
+ puts $outfile "eval sijointest\
+ {\[list $pm btree hash\]} 1000"
+ puts $outfile "eval sijointest\
+ {\[list $pm hash btree\]} 1000"
+ }
+ if { $run } {
+ foreach sm { btree hash } {
+ eval sijointest {[list $pm $sm $sm]} 1000
+ }
+ eval sijointest {[list $pm btree hash]} 1000
+ eval sijointest {[list $pm hash btree]} 1000
+ }
+ }
+}
+
+proc run { proc_suffix method {start 1} {stop 999} } {
+ global test_names
+
+ switch -exact -- $proc_suffix {
+ envmethod -
+ method -
+ recd -
+ repmethod -
+ reptest -
+ secenv -
+ secmethod {
+ # Run_recd runs the recd tests, all others
+ # run the "testxxx" tests.
+ if { $proc_suffix == "recd" } {
+ set testtype recd
+ } else {
+ set testtype test
+ }
+
+ for { set i $start } { $i <= $stop } { incr i } {
+ set name [format "%s%03d" $testtype $i]
+ # If a test number is missing, silently skip
+ # to next test; sparse numbering is allowed.
+ if { [lsearch -exact $test_names($testtype) \
+ $name] == -1 } {
+ continue
+ }
+ run_$proc_suffix $method $name
+ }
+ }
+ default {
+ puts "$proc_suffix is not set up with to be used with run"
+ }
+ }
+}
+
+
+# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
+# of these is the default pagesize. We don't want to run all the AM tests
+# twice, so figure out what the default page size is, then return the
+# other two.
+proc get_test_pagesizes { } {
+ # Create an in-memory database.
+ set db [berkdb_open -create -btree]
+ error_check_good gtp_create [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+
+ error_check_good gtp_close [$db close] 0
+
+ error_check_bad gtp_pgsz $pgsz 0
+ switch $pgsz {
+ 512 { return {8192 65536} }
+ 8192 { return {512 65536} }
+ 65536 { return {512 8192} }
+ default { return {512 8192 65536} }
+ }
+ error_check_good NOTREACHED 0 1
+}
+
+proc run_timed_once { timedtest args } {
+ set start [timestamp -r]
+ set ret [catch {
+ eval $timedtest $args
+ flush stdout
+ flush stderr
+ } res]
+ set stop [timestamp -r]
+ if { $ret != 0 } {
+ global errorInfo
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_timed: $timedtest: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ return [expr $stop - $start]
+}
+
+proc run_timed { niter timedtest args } {
+ if { $niter < 1 } {
+ error "run_timed: Invalid number of iterations $niter"
+ }
+ set sum 0
+ set e {}
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set elapsed [eval run_timed_once $timedtest $args]
+ lappend e $elapsed
+ set sum [expr $sum + $elapsed]
+ puts "Test $timedtest run $i completed: $elapsed seconds"
+ }
+ if { $niter > 1 } {
+ set avg [expr $sum / $niter]
+ puts "Average $timedtest time: $avg"
+ puts "Raw $timedtest data: $e"
+ }
+}
diff --git a/db-4.8.30/test/test001.tcl b/db-4.8.30/test/test001.tcl
new file mode 100644
index 0000000..9303e9a
--- /dev/null
+++ b/db-4.8.30/test/test001.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test001
+# TEST Small keys/data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test001 { method {nentries 10000} \
+ {start 0} {skip 0} {tnum "001"} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ append args " -cachesize {0 1048576 3} "
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test$tnum: $method ($args) $nentries equal key/data pairs"
+ set did [open $dict]
+
+ # The "start" variable determines the record number to start
+ # with, if we're using record numbers. The "skip" variable
+ # determines the dictionary entry to start with.
+ # In normal use, skip will match start.
+
+ puts "\tTest$tnum: Starting at $start with dictionary entry $skip"
+ if { $skip != 0 } {
+ for { set count 0 } { $count < $skip } { incr count } {
+ gets $did str
+ }
+ }
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set temp $testdir/temp
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test001.check
+ }
+ puts "\tTest$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ if { $count % 50 == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for success
+ set ret [$db get -get_both $key [pad_data $method $str]]
+ error_check_good \
+ getboth $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for failure
+ set ret [$db get -get_both $key [pad_data $method BAD$str]]
+ error_check_good getbothBAD [llength $ret] 0
+
+ incr count
+ }
+ close $did
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+
+ puts "\tTest$tnum.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ #
+ # dump_file should just have been "get" calls, so
+ # aborting a get should really be a no-op. Abort
+ # just for the fun of it.
+ if { $txnenv == 1 } {
+ error_check_good txn [$t abort] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set j [expr $i + $start]
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ close $oid
+ } else {
+ filehead [expr $nentries + $start] $dict $t2 [expr $start + 1]
+ }
+ filesort $t2 $temp
+ file rename -force $temp $t2
+ filesort $t1 $t3
+
+ error_check_good Test$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $args
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest$tnum.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $args
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test001; keys and data are identical
+proc test001.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc test001_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/test002.tcl b/db-4.8.30/test/test002.tcl
new file mode 100644
index 0000000..f5d5ee5
--- /dev/null
+++ b/db-4.8.30/test/test002.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test002
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+
+proc test002 { method {nentries 10000} args } {
+ global datastr
+ global pad_datastr
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test002.db
+ set env NULL
+ } else {
+ set testfile test002.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ puts "Test002: $method ($args) $nentries key <fixed data> pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ set pad_datastr [pad_data $method $datastr]
+ puts "\tTest002.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+
+ error_check_good get $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest002.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test002.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest002.c: close, open, and dump file"
+ eval open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-first" "-next" $args
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest002.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-last" "-prev" $args
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test002; data should be fixed are identical
+proc test002.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
diff --git a/db-4.8.30/test/test003.tcl b/db-4.8.30/test/test003.tcl
new file mode 100644
index 0000000..13216b8
--- /dev/null
+++ b/db-4.8.30/test/test003.tcl
@@ -0,0 +1,204 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test003
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Take the source files and dbtest executable and enter their names
+# TEST as the key with their contents as data. After all are entered,
+# TEST retrieve all; compare output to original. Close file, reopen, do
+# TEST retrieve and re-verify.
+proc test003 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if {[is_fixed_length $method] == 1} {
+ puts "Test003 skipping for method $method"
+ return
+ }
+ puts "Test003: $method ($args) filename=key filecontents=data pairs"
+
+ # Create the database and open the dictionary
+ set limit 0
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test003.db
+ set env NULL
+ } else {
+ set testfile test003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set limit 100
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test003_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc test003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ set len [llength $file_list]
+ puts "\tTest003.a: put/get loop $len entries"
+ set count 0
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid [pad_data $method $data]
+ }
+ close $fid
+
+ error_check_good \
+ Test003:diff($f,$t4) [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest003.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the entries in the
+ # current directory
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest003.c: close, open, and dump file"
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-first" "-next" $args
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest003.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-last" "-prev" $args
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test003; key should be file name; data should be contents
+proc test003.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test003:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc test003_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Test003:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/db-4.8.30/test/test004.tcl b/db-4.8.30/test/test004.tcl
new file mode 100644
index 0000000..10b330f
--- /dev/null
+++ b/db-4.8.30/test/test004.tcl
@@ -0,0 +1,168 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test004
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database.
+# TEST Read through the database sequentially using cursors and
+# TEST delete each element.
+proc test004 { method {nentries 10000} {reopen "004"} {build_only 0} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set tnum test$reopen
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "$tnum:\
+ $method ($args) $nentries delete small key; medium data pairs"
+ if {$reopen == "005"} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set kvals ""
+ puts "\tTest$reopen.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ lappend kvals $str
+ } else {
+ set key $str
+ }
+
+ set datastr [ make_data_str $str ]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tnum:put" $ret \
+ [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+ if { $build_only == 1 } {
+ return $db
+ }
+ if { $reopen == "005" } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+ puts "\tTest$reopen.b: get/delete loop"
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set outf [open $t1 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+
+ set count 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ if { [is_record_based $method] == 1 } {
+ set datastr \
+ [make_data_str [lindex $kvals [expr $k - 1]]]
+ } else {
+ set datastr [make_data_str $k]
+ }
+ error_check_good $tnum:$k $d2 [pad_data $method $datastr]
+ puts $outf $k
+ $c del
+ if { [is_record_based $method] == 1 && \
+ $do_renumber == 1 } {
+ set kvals [lreplace $kvals 0 0]
+ }
+ incr count
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ error_check_good test$reopen:keys_deleted $count $nentries
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ error_check_good Test$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test005.tcl b/db-4.8.30/test/test005.tcl
new file mode 100644
index 0000000..ab7ab30
--- /dev/null
+++ b/db-4.8.30/test/test005.tcl
@@ -0,0 +1,18 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test005
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database; close
+# TEST it and reopen it. Then read through the database sequentially
+# TEST using cursors and delete each element.
+proc test005 { method {nentries 10000} args } {
+ eval {test004 $method $nentries "005" 0} $args
+}
diff --git a/db-4.8.30/test/test006.tcl b/db-4.8.30/test/test006.tcl
new file mode 100644
index 0000000..e61cbad
--- /dev/null
+++ b/db-4.8.30/test/test006.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test006
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Keyed delete and verify
+# TEST
+# TEST Keyed delete test.
+# TEST Create database.
+# TEST Go through database, deleting all entries by key.
+# TEST Then do the same for unsorted and sorted dups.
+proc test006 { method {nentries 10000} {reopen 0} {tnum "006"} \
+ {ndups 5} args } {
+
+ test006_body $method $nentries $reopen $tnum 1 "" "" $args
+
+ # For methods supporting dups, run the test with sorted and
+ # with unsorted dups.
+ if { [is_btree $method] == 1 || [is_hash $method] == 1 } {
+ foreach {sort flags} {unsorted -dup sorted "-dup -dupsort"} {
+ test006_body $method $nentries $reopen \
+ $tnum $ndups $sort $flags $args
+ }
+ }
+}
+
+proc test006_body { method {nentries 10000} {reopen 0} {tnum "006"} \
+ {ndups 5} sort flags {largs ""} } {
+ global is_je_test
+ source ./include.tcl
+
+ if { [is_compressed $largs] && $sort == "unsorted" } {
+ puts "Test$tnum skipping $sort duplicates for compression"
+ return
+ }
+
+ set do_renumber [is_rrecno $method]
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ set tname Test$tnum
+ set dbname test$tnum
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $largs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/$dbname
+ set env NULL
+ } else {
+ set basename $dbname
+ incr eindex
+ set env [lindex $largs $eindex]
+ if { $is_je_test && $sort == "unsorted" } {
+ puts "Test$tnum skipping $sort duplicates for JE"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append largs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "$tname: $method ($flags $largs) "
+ puts -nonewline "$nentries equal small key; medium data pairs"
+ if {$reopen == 1} {
+ puts " (with close)"
+ } else {
+ puts ""
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ cleanup $testdir $env
+
+ # Here is the loop where we put and get each key/data pair.
+
+ set count 0
+ set testfile $basename$sort.db
+ set db [eval {berkdb_open -create \
+ -mode 0644} $largs $flags {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\t$tname.a: put/get loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1 ]
+ } else {
+ set key $str
+ }
+
+ set str [make_data_str $str]
+ for { set j 1 } { $j <= $ndups } {incr j} {
+ set datastr $j$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn \
+ [$t commit] 0
+ }
+ }
+ incr count
+ }
+ close $did
+
+ # Close and reopen database, if testing reopen.
+
+ if { $reopen == 1 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $largs $flags {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+
+ puts "\t$tname.b: get/delete loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set i 1
+ for { set ret [$dbc get -first] } \
+ { [string length $ret] != 0 } \
+ { set ret [$dbc get -next] } {
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ if { $i == 1 } {
+ set curkey $key
+ }
+ error_check_good seq_get:key:$i $key $curkey
+
+ if { $i == $ndups } {
+ set i 1
+ } else {
+ incr i
+ }
+
+ # Now delete the key
+ set ret [$dbc del]
+ error_check_good db_del:$key $ret 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\t$tname.c: verify empty file"
+ # Double check that file is now empty
+ set db [eval {berkdb_open} $largs $flags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -first]
+ error_check_good get_on_empty [string length $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test007.tcl b/db-4.8.30/test/test007.tcl
new file mode 100644
index 0000000..2dfb46b
--- /dev/null
+++ b/db-4.8.30/test/test007.tcl
@@ -0,0 +1,18 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test007
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Keyed delete
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry. (Test006 plus reopen)
+proc test007 { method {nentries 10000} {tnum "007"} {ndups 5} args} {
+ eval {test006 $method $nentries 1 $tnum $ndups} $args
+}
diff --git a/db-4.8.30/test/test008.tcl b/db-4.8.30/test/test008.tcl
new file mode 100644
index 0000000..c6ca3ff
--- /dev/null
+++ b/db-4.8.30/test/test008.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test008
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Loop through keys by steps (which change)
+# TEST ... delete each key at step
+# TEST ... add each key back
+# TEST ... change step
+# TEST Confirm that overflow pages are getting reused
+# TEST
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, begin
+# TEST looping through the entries; deleting some pairs and then readding them.
+proc test008 { method {reopen "008"} {debug 0} args} {
+ source ./include.tcl
+
+ set tnum test$reopen
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test$reopen skipping for method $method"
+ return
+ }
+
+ puts -nonewline "$tnum: $method filename=key filecontents=data pairs"
+ if {$reopen == "009"} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ set count 0
+ puts "\tTest$reopen.a: Initial put/get loop"
+ foreach f $file_list {
+ set names($count) $f
+ set key $f
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $f
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ get_file $db $txn $gflags $f $t4
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good Test$reopen:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ if {$reopen == "009"} {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get step through keys again (by increments) and
+ # delete all the entries, then re-insert them.
+
+ puts "\tTest$reopen.b: Delete re-add loop"
+ foreach i "1 2 4 8 16" {
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db del} $txn {$names($ndx)}]
+ error_check_good db_del:$names($ndx) $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $names($ndx)
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ if {$reopen == "009"} {
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now, reopen the file and make sure the key/data pairs look right.
+ puts "\tTest$reopen.c: Dump contents forward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 test008.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest$reopen.d: Dump contents backward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ filesort $t1 $t3
+
+ error_check_good Test$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ error_check_good close:$db [$db close] 0
+}
+
+proc test008.check { binfile tmpfile } {
+ global tnum
+ source ./include.tcl
+
+ error_check_good diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/db-4.8.30/test/test009.tcl b/db-4.8.30/test/test009.tcl
new file mode 100644
index 0000000..0c98d57
--- /dev/null
+++ b/db-4.8.30/test/test009.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test009
+# TEST Small keys/large data
+# TEST Same as test008; close and reopen database
+# TEST
+# TEST Check that we reuse overflow pages. Create database with lots of
+# TEST big key/data pairs. Go through and delete and add keys back
+# TEST randomly. Then close the DB and make sure that we have everything
+# TEST we think we should.
+proc test009 { method args} {
+ eval {test008 $method "009" 0} $args
+}
diff --git a/db-4.8.30/test/test010.tcl b/db-4.8.30/test/test010.tcl
new file mode 100644
index 0000000..ed1f2c2
--- /dev/null
+++ b/db-4.8.30/test/test010.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test010
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test010 { method {nentries 10000} {ndups 5} {tnum "010"} args } {
+ source ./include.tcl
+
+ set omethod $method
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test$tnum: $method ($args) $nentries \
+ small $ndups dup key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good "Test$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test$tnum:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.a: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.b: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test011.tcl b/db-4.8.30/test/test011.tcl
new file mode 100644
index 0000000..7e41181
--- /dev/null
+++ b/db-4.8.30/test/test011.tcl
@@ -0,0 +1,475 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test011
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+# TEST To test off-page duplicates, run with small pagesize.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST Then do some key_first/key_last add_before, add_after operations.
+# TEST This does not work for recno
+# TEST
+# TEST To test if dups work when they fall off the main page, run this with
+# TEST a very tiny page size.
+proc test011 { method {nentries 10000} {ndups 5} {tnum "011"} args } {
+ global dlist
+ global rand_init
+ source ./include.tcl
+
+ set dlist ""
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ test011_recno $method $nentries $tnum $args
+ return
+ }
+ if {$ndups < 5} {
+ set ndups 5
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "Test$tnum: $method $nentries small $ndups dup "
+ puts "key/data pairs, cursor ops"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644} [concat $args "-dup"] {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add dups with values 1, 3, ... $ndups. Then we'll add
+ # 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
+ # add before and add after.
+ puts "\tTest$tnum.a: put and get duplicate keys."
+ set i ""
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ lappend dlist $i
+ }
+ set maxodd $i
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$str $datastr}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str ]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+
+ error_check_good Test$tnum:put $d $str
+ set id [ id_of $datastr ]
+ error_check_good Test$tnum:dup# $id $x
+ incr x 2
+ }
+ error_check_good Test$tnum:numdups $x $maxodd
+ error_check_good curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.b: \
+ traverse entire file checking duplicates before close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.c: \
+ traverse entire file checking duplicates after close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest$tnum.d: Testing key_first functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keyfirst" 0 0
+ set dlist [linsert $dlist 0 0]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest$tnum.e: Testing key_last functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
+ lappend dlist [expr $maxodd - 1]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest$tnum.f: Testing add_before functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-before" 2 3
+ set dlist [linsert $dlist 2 2]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest$tnum.g: Testing add_after functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-after" 4 4
+ set dlist [linsert $dlist 4 4]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
+
+proc add_dup {db txn nentries flag dataval iter} {
+ source ./include.tcl
+
+ set dbc [eval {$db cursor} $txn]
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set datastr $dataval:$str
+ set ret [$dbc get "-set" $str]
+ error_check_bad "cget(SET)" [is_substr $ret Error] 1
+ for { set i 1 } { $i < $iter } { incr i } {
+ set ret [$dbc get "-next"]
+ error_check_bad "cget(NEXT)" [is_substr $ret Error] 1
+ }
+
+ if { [string compare $flag "-before"] == 0 ||
+ [string compare $flag "-after"] == 0 } {
+ set ret [$dbc put $flag $datastr]
+ } else {
+ set ret [$dbc put $flag $str $datastr]
+ }
+ error_check_good "$dbc put $flag" $ret 0
+ incr count
+ }
+ close $did
+ $dbc close
+}
+
+proc test011_recno { method {nentries 10000} {tnum "011"} largs } {
+ global dlist
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+ set renum [is_rrecno $method]
+
+ puts "Test$tnum: \
+ $method ($largs) $nentries test cursor insert functionality"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $largs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $largs $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append largs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ if {$renum == 1} {
+ append largs " -renumber"
+ }
+ set db [eval {berkdb_open \
+ -create -mode 0644} $largs {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # The basic structure of the test is that we pick a random key
+ # in the database and then add items before, after, ?? it. The
+ # trickiness is that with RECNO, these are not duplicates, they
+ # are creating new keys. Therefore, every time we do this, the
+ # keys assigned to other values change. For this reason, we'll
+ # keep the database in tcl as a list and insert properly into
+ # it to verify that the right thing is happening. If we do not
+ # have renumber set, then the BEFORE and AFTER calls should fail.
+
+ # Seed the database with an initial record
+ gets $did str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good put $ret 0
+ set count 1
+
+ set dlist "NULL $str"
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ puts "\tTest$tnum.a: put and get entries"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Pick a random key
+ set key [berkdb random_int 1 $count]
+ set ret [$dbc get -set $key]
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:SET:key $k $key
+ error_check_good \
+ cget:SET $d [pad_data $method [lindex $dlist $key]]
+
+ # Current
+ set ret [$dbc put -current [chop_data $method $str]]
+ error_check_good cput:$key $ret 0
+ set dlist [lreplace $dlist $key $key [pad_data $method $str]]
+
+ # Before
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+
+ if { $renum == 1 } {
+ set ret [$dbc put \
+ -before [chop_data $method $str]]
+ error_check_good cput:$key:BEFORE $ret $key
+ set dlist [linsert $dlist $key $str]
+ incr count
+
+ # After
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+ set ret [$dbc put \
+ -after [chop_data $method $str]]
+ error_check_good cput:$key:AFTER $ret [expr $key + 1]
+ set dlist [linsert $dlist [expr $key + 1] $str]
+ incr count
+ }
+
+ # Now verify that the keys are in the right place
+ set i 0
+ for {set ret [$dbc get "-set" $key]} \
+ {[string length $ret] != 0 && $i < 3} \
+ {set ret [$dbc get "-next"] } {
+ set check_key [expr $key + $i]
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good cget:$key:loop $k $check_key
+
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:data $d \
+ [pad_data $method [lindex $dlist $check_key]]
+ incr i
+ }
+ }
+ close $did
+ error_check_good cclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create check key file.
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+
+ puts "\tTest$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test011_check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good Test$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum.c: close, open, and dump file"
+ eval open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-first" "-next" $largs
+ error_check_good Test$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ puts "\tTest$tnum.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-last" "-prev" $largs
+
+ filesort $t1 $t3 -n
+ error_check_good Test$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+}
+
+proc test011_check { key data } {
+ global dlist
+
+ error_check_good "get key $key" $data [lindex $dlist $key]
+}
diff --git a/db-4.8.30/test/test012.tcl b/db-4.8.30/test/test012.tcl
new file mode 100644
index 0000000..44e6c71
--- /dev/null
+++ b/db-4.8.30/test/test012.tcl
@@ -0,0 +1,138 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test012
+# TEST Large keys/small data
+# TEST Same as test003 except use big keys (source files and
+# TEST executables) and small data (the file/executable names).
+# TEST
+# TEST Take the source files and dbtest executable and enter their contents
+# TEST as the key with their names as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc test012 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test012 skipping for method $method"
+ return
+ }
+
+ puts "Test012: $method ($args) filename=data filecontents=key pairs"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test012.db
+ set env NULL
+ } else {
+ set testfile test012.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ puts "\tTest012.a: put/get loop"
+ set count 0
+ foreach f $file_list {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file_as_key $db $txn $pflags $f
+
+ set kd [get_file_as_key $db $txn $gflags $f]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest012.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_binkey_file $db $txn $t1 test012.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the data to see if they match the .o and dbtest files
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest012.c: close, open, and dump file"
+ eval open_and_dump_file $testfile $env $t1 test012.check \
+ dump_binkey_file_direction "-first" "-next" $args
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest012.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 test012.check\
+ dump_binkey_file_direction "-last" "-prev" $args
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test012; key should be file name; data should be contents
+proc test012.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test012:diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/db-4.8.30/test/test013.tcl b/db-4.8.30/test/test013.tcl
new file mode 100644
index 0000000..aae2a9b
--- /dev/null
+++ b/db-4.8.30/test/test013.tcl
@@ -0,0 +1,239 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test013
+# TEST Partial put test
+# TEST Overwrite entire records using partial puts.
+# TEST Make sure that NOOVERWRITE flag works.
+# TEST
+# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+# TEST 3. Actually overwrite each one with its datum reversed.
+# TEST
+# TEST No partial testing here.
+proc test013 { method {nentries 10000} args } {
+ global errorCode
+ global errorInfo
+ global fixed_len
+
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test013.db
+ set env NULL
+ } else {
+ set testfile test013.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test013_recno.check
+ append gflags " -recno"
+ global kvals
+ } else {
+ set checkfunc test013.check
+ }
+ puts "\tTest013.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will try to overwrite each datum, but set the
+ # NOOVERWRITE flag.
+ puts "\tTest013.b: overwrite values with NOOVERWRITE flag."
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $key [chop_data $method $str]}]
+ error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Value should be unchanged.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will replace each item with its datum capitalized.
+ puts "\tTest013.c: overwrite values with capitalized datum"
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set rstr [string toupper $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $rstr]}]
+ error_check_good put $r 0
+
+ # Value should be changed.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $rstr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest013.d: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest013.e: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $args
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest013.f: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $args
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test013; keys and data are identical
+proc test013.check { key data } {
+ error_check_good \
+ "key/data mismatch for $key" $data [string toupper $key]
+}
+
+proc test013_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good \
+ "data mismatch for $key" $data [string toupper $kvals($key)]
+}
diff --git a/db-4.8.30/test/test014.tcl b/db-4.8.30/test/test014.tcl
new file mode 100644
index 0000000..5482376
--- /dev/null
+++ b/db-4.8.30/test/test014.tcl
@@ -0,0 +1,252 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test014
+# TEST Exercise partial puts on short data
+# TEST Run 5 combinations of numbers of characters to replace,
+# TEST and number of times to increase the size by.
+# TEST
+# TEST Partial put test, small data, replacing with same size. The data set
+# TEST consists of the first nentries of the dictionary. We will insert them
+# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+# TEST we'll try to perform partial puts of some characters at the beginning,
+# TEST some at the end, and some at the middle.
+proc test014 { method {nentries 10000} args } {
+ set fixed 0
+ set args [convert_args $method $args]
+
+ if { [is_fixed_length $method] == 1 } {
+ set fixed 1
+ }
+
+ puts "Test014: $method ($args) $nentries equal key/data pairs, put test"
+
+ # flagp indicates whether this is a postpend or a
+ # normal partial put
+ set flagp 0
+
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 2 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ eval {test014_body $method $flagp 2 16 $nentries} $args
+ if { $fixed == 0 } {
+ eval {test014_body $method $flagp 0 1 $nentries} $args
+ eval {test014_body $method $flagp 0 4 $nentries} $args
+ eval {test014_body $method $flagp 0 128 $nentries} $args
+
+ # POST-PENDS :
+ # partial put data after the end of the existent record
+ # chars: number of empty spaces that will be padded with null
+ # increase: is the length of the str to be appended (after pad)
+ #
+ set flagp 1
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 4 1 $nentries} $args
+ eval {test014_body $method $flagp 128 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ }
+ puts "Test014 complete."
+}
+
+proc test014_body { method flagp chars increase {nentries 10000} args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 && $chars != $increase } {
+ puts "Test014: $method: skipping replace\
+ $chars chars with string $increase times larger."
+ return
+ }
+
+ if { $flagp == 1} {
+ puts "Test014: Postpending string of len $increase with \
+ gap $chars."
+ } else {
+ puts "Test014: Replace $chars chars with string \
+ $increase times larger"
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test014.db
+ set env NULL
+ } else {
+ set testfile test014.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set gflags ""
+ set pflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest014.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $flagp == 1 } {
+ # this is for postpend only
+ global dvals
+
+ # initial put
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $str}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbput $ret 0
+
+ set offset [string length $str]
+
+ # increase is the actual number of new bytes
+ # to be postpended (besides the null padding)
+ set data [repeat "P" $increase]
+
+ # chars is the amount of padding in between
+ # the old data and the new
+ set len [expr $offset + $chars + $increase]
+ set dvals($key) [binary format \
+ a[set offset]x[set chars]a[set increase] \
+ $str $data]
+ set offset [expr $offset + $chars]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put -partial [list $offset 0]} \
+ $txn {$key $data}]
+ error_check_good dbput:post $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ } else {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ partial_put $method $db $txn \
+ $gflags $key $str $chars $increase
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest014.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test014.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest014.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ eval open_and_dump_file $testfile $env \
+ $t1 test014.check dump_file_direction "-first" "-next" $args
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest014.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 \
+ test014.check dump_file_direction "-last" "-prev" $args
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test014; keys and data are identical
+proc test014.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/db-4.8.30/test/test015.tcl b/db-4.8.30/test/test015.tcl
new file mode 100644
index 0000000..57c1b6e
--- /dev/null
+++ b/db-4.8.30/test/test015.tcl
@@ -0,0 +1,284 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test015
+# TEST Partial put test
+# TEST Partial put test where the key does not initially exist.
+proc test015 { method {nentries 7500} { start 0 } args } {
+ global fixed_len testdir
+ set orig_tdir $testdir
+
+ set low_range 50
+ set mid_range 100
+ set high_range 1000
+
+ if { [is_fixed_length $method] } {
+ set low_range [expr $fixed_len/2 - 2]
+ set mid_range [expr $fixed_len/2]
+ set high_range $fixed_len
+ }
+
+ set t_table {
+ { 1 { 1 1 1 } }
+ { 2 { 1 1 5 } }
+ { 3 { 1 1 $low_range } }
+ { 4 { 1 $mid_range 1 } }
+ { 5 { $mid_range $high_range 5 } }
+ { 6 { 1 $mid_range $low_range } }
+ }
+
+ puts "Test015: \
+ $method ($args) $nentries equal key/data pairs, partial put test"
+ test015_init
+ if { $start == 0 } {
+ set start { 1 2 3 4 5 6 }
+ }
+ if { [is_partitioned $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+ foreach entry $t_table {
+ set this [lindex $entry 0]
+ if { [lsearch $start $this] == -1 } {
+ continue
+ }
+ puts -nonewline "$this: "
+ eval [concat test015_body $method [lindex $entry 1] \
+ $nentries $args]
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+
+ error_check_good verify \
+ [verify_dir $testdir "\tTest015.e:" 0 0 $nodump] 0
+ }
+ set testdir $orig_tdir
+}
+
+proc test015_init { } {
+ global rand_init
+
+ berkdb srand $rand_init
+}
+
+proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
+ global dvals
+ global fixed_len
+ global testdir
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set orig_tdir $testdir
+ set checkfunc test015.check
+
+ if { [is_fixed_length $method] && \
+ [string compare $omethod "-recno"] == 0} {
+ # is fixed recno method
+ set checkfunc test015.check
+ }
+
+ puts "Put $rcount strings random offsets between $off_low and $off_hi"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test015.db
+ set env NULL
+ } else {
+ set testfile test015.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries > 5000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set retdir $testdir
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ puts "\tTest015.a: put/get loop for $nentries entries"
+
+ # Here is the loop where we put and get each key/data pair
+ # Each put is a partial put of a record that does not exist.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ if { [string length $str] > $fixed_len } {
+ continue
+ }
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { 0 } {
+ set data [replicate $str $rcount]
+ set off [ berkdb random_int $off_low $off_hi ]
+ set offn [expr $off + 1]
+ if { [is_fixed_length $method] && \
+ [expr [string length $data] + $off] >= $fixed_len} {
+ set data [string range $data 0 [expr $fixed_len-$offn]]
+ }
+ set dvals($key) [partial_shift $data $off right]
+ } else {
+ set data [chop_data $method [replicate $str $rcount]]
+
+ # This is a hack. In DB we will store the records with
+ # some padding, but these will get lost if we just return
+ # them in TCL. As a result, we're going to have to hack
+ # get to check for 0 padding and return a list consisting
+ # of the number of 0's and the actual data.
+ set off [ berkdb random_int $off_low $off_hi ]
+
+ # There is no string concatenation function in Tcl
+ # (although there is one in TclX), so we have to resort
+ # to this hack. Ugh.
+ set slen [string length $data]
+ if {[is_fixed_length $method] && \
+ $slen > $fixed_len - $off} {
+ set $slen [expr $fixed_len - $off]
+ }
+ set a "a"
+ set dvals($key) [pad_data \
+ $method [eval "binary format x$off$a$slen" {$data}]]
+ }
+ if {[is_fixed_length $method] && \
+ [string length $data] > ($fixed_len - $off)} {
+ set slen [expr $fixed_len - $off]
+ set data [eval "binary format a$slen" {$data}]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn \
+ {-partial [list $off [string length $data]] $key $data}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest015.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest015.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ eval open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-first" "-next" $args
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest015.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-last" "-prev" $args
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ unset dvals
+ set testdir $orig_tdir
+}
+
+# Check function for test015; keys and data are identical
+proc test015.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ binary scan $data "c[string length $data]" a
+ binary scan $dvals($key) "c[string length $dvals($key)]" b
+ error_check_good "mismatch on padding for key $key" $a $b
+}
+
+proc test015.fixed.check { key data } {
+ global dvals
+ global fixed_len
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ if { [string length $data] > $fixed_len } {
+ error_check_bad \
+ "data length:[string length $data] \
+ for fixed:$fixed_len" 1 1
+ }
+ puts "$data : $dvals($key)"
+ error_check_good compare_data($data,$dvals($key) \
+ $dvals($key) $data
+}
diff --git a/db-4.8.30/test/test016.tcl b/db-4.8.30/test/test016.tcl
new file mode 100644
index 0000000..4f64964
--- /dev/null
+++ b/db-4.8.30/test/test016.tcl
@@ -0,0 +1,206 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test016
+# TEST Partial put test
+# TEST Partial put where the datum gets shorter as a result of the put.
+# TEST
+# TEST Partial put test where partial puts make the record smaller.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, go back and do partial puts,
+# TEST replacing a random-length string with the key value.
+# TEST Then verify.
+
+proc test016 { method {nentries 10000} args } {
+ global datastr
+ global dvals
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Test016: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test016.db
+ set env NULL
+ } else {
+ set testfile test016.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test016: $method ($args) $nentries partial put shorten"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest016.a: put/get loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Next we will do a partial put replacement, making the data
+ # shorter
+ puts "\tTest016.b: partial put loop"
+ set did [open $dict]
+ set count 0
+ set len [string length $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set repl_len [berkdb random_int [string length $key] $len]
+ set repl_off [berkdb random_int 0 [expr $len - $repl_len] ]
+ set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
+ set s2 [string toupper $key]
+ set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
+ set dvals($key) [pad_data $method $s1$s2$s3]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-partial \
+ [list $repl_off $repl_len] $key [chop_data $method $s2]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest016.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test016.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest016.d: close, open, and dump file"
+ eval open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-first" "-next" $args
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest016.e: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-last" "-prev" $args
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test016; data should be whatever is set in dvals
+proc test016.check { key data } {
+ global datastr
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/db-4.8.30/test/test017.tcl b/db-4.8.30/test/test017.tcl
new file mode 100644
index 0000000..00961a2
--- /dev/null
+++ b/db-4.8.30/test/test017.tcl
@@ -0,0 +1,321 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test017
+# TEST Basic offpage duplicate test.
+# TEST
+# TEST Run duplicates with small page size so that we test off page duplicates.
+# TEST Then after we have an off-page database, test with overflow pages too.
+proc test017 { method {contents 0} {ndups 19} {tnum "017"} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test$tnum: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ # Create the database and open the dictionary
+ set limit 0
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set limit 100
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ set file_list [get_file_list 1]
+ if { $txnenv == 1 } {
+ if { [llength $file_list] > $limit } {
+ set file_list [lrange $file_list 0 $limit]
+ }
+ set flen [llength $file_list]
+ reduce_dups flen ndups
+ }
+ puts "Test$tnum: $method ($args) Off page duplicate tests\
+ with $ndups duplicates"
+
+ set ovfl ""
+ # Here is the loop where we put and get each key/data pair
+ puts -nonewline "\tTest$tnum.a: Creating duplicates with "
+ if { $contents != 0 } {
+ puts "file contents as key/data"
+ } else {
+ puts "file name as key/data"
+ }
+ foreach f $file_list {
+ if { $contents != 0 } {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ #
+ # Prepend file name to guarantee uniqueness
+ set filecont [read $fid]
+ set str $f:$filecont
+ close $fid
+ } else {
+ set str $f
+ }
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ #
+ # Save 10% files for overflow test
+ #
+ if { $contents == 0 && [expr $count % 10] == 0 } {
+ lappend ovfl $f
+ }
+ # Now retrieve all the keys matching this key
+ set ret [$db get $str]
+ error_check_bad $f:dbget_dups [llength $ret] 0
+ error_check_good $f:dbget_dups1 [llength $ret] $ndups
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ if {[string length $d] == 0} {
+ break
+ }
+ error_check_good "Test$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test$tnum:$f:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.b: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ set oid [open $t2.tmp w]
+ set o1id [open $t4.tmp w]
+ foreach f $file_list {
+ for {set i 1} {$i <= $ndups} {incr i} {
+ puts $o1id $f
+ }
+ puts $oid $f
+ }
+ close $oid
+ close $o1id
+ filesort $t2.tmp $t2
+ filesort $t4.tmp $t4
+ fileremove $t2.tmp
+ fileremove $t4.tmp
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if {$contents == 0} {
+ filesort $t1 $t3
+
+ error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now compare the keys to see if they match the file names
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test017.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+
+ error_check_good Test$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.c: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if {$contents == 0} {
+ # Now compare the keys to see if they match the filenames
+ filesort $t1 $t3
+ error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum.d: Verify off page duplicates and overflow status"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+ if { [is_btree $method] } {
+ error_check_bad stat:offpage \
+ [is_substr $stat "{{Internal pages} 0}"] 1
+ }
+ if {$contents == 0} {
+ # This check doesn't work in hash, since overflow
+ # pages count extra pages in buckets as well as true
+ # P_OVERFLOW pages.
+ if { [is_hash $method] == 0 } {
+ error_check_good overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ } else {
+ if { [is_hash $method] } {
+ error_check_bad overflow \
+ [is_substr $stat "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ }
+
+ #
+ # If doing overflow test, do that now. Else we are done.
+ # Add overflow pages by adding a large entry to a duplicate.
+ #
+ if { [llength $ovfl] == 0} {
+ error_check_good db_close [$db close] 0
+ return
+ }
+
+ puts "\tTest$tnum.e: Add overflow duplicate entries"
+ set ovfldup [expr $ndups + 1]
+ foreach f $ovfl {
+ #
+ # This is just like put_file, but prepends the dup number
+ #
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set fdata [read $fid]
+ close $fid
+ set data $ovfldup:$fdata:$fdata:$fdata:$fdata
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$f $data}]
+ error_check_good ovfl_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest$tnum.f: Verify overflow duplicate entries"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist $ovfldup
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+ error_check_good Test$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ set stat [$db stat]
+ if { [is_hash [$db get_type]] } {
+ error_check_bad overflow1_hash [is_substr $stat \
+ "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad \
+ overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Check function; verify data contains key
+proc test017.check { key data } {
+ error_check_good "data mismatch for key $key" $key [data_of $data]
+}
diff --git a/db-4.8.30/test/test018.tcl b/db-4.8.30/test/test018.tcl
new file mode 100644
index 0000000..ab0226d
--- /dev/null
+++ b/db-4.8.30/test/test018.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test018
+# TEST Offpage duplicate test
+# TEST Key_{first,last,before,after} offpage duplicates.
+# TEST Run duplicates with small page size so that we test off page
+# TEST duplicates.
+proc test018 { method {nentries 10000} args} {
+ puts "Test018: Off page duplicate tests"
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test018: Skipping for specific pagesizes"
+ return
+ }
+ eval {test011 $method $nentries 19 "018" -pagesize 512} $args
+}
diff --git a/db-4.8.30/test/test019.tcl b/db-4.8.30/test/test019.tcl
new file mode 100644
index 0000000..4e94356
--- /dev/null
+++ b/db-4.8.30/test/test019.tcl
@@ -0,0 +1,135 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test019
+# TEST Partial get test.
+proc test019 { method {nentries 10000} args } {
+ global fixed_len
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test019.db
+ set env NULL
+ } else {
+ set testfile test019.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test019: $method ($args) $nentries partial get test"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ berkdb srand $rand_init
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest019.a: put/get loop"
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set repl [berkdb random_int $fixed_len 100]
+ set data [chop_data $method [replicate $str $repl]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-nooverwrite $key $data}]
+ error_check_good dbput:$key $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ dbget:$key $ret [list [list $key [pad_data $method $data]]]
+ set kvals($key) $repl
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+
+ puts "\tTest019.b: partial get loop"
+ set did [open $dict]
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set data [pad_data $method [replicate $str $kvals($key)]]
+
+ set maxndx [expr [string length $data] - 1]
+
+ if { $maxndx > 0 } {
+ set beg [berkdb random_int 0 [expr $maxndx - 1]]
+ set len [berkdb random_int 0 [expr $maxndx * 2]]
+ } else {
+ set beg 0
+ set len 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} \
+ $txn {-partial [list $beg $len]} $gflags {$key}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # In order for tcl to handle this, we have to overwrite the
+ # last character with a NULL. That makes the length one less
+ # than we expect.
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good dbget_key $k $key
+
+ error_check_good dbget_data $d \
+ [string range $data $beg [expr $beg + $len - 1]]
+
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
diff --git a/db-4.8.30/test/test020.tcl b/db-4.8.30/test/test020.tcl
new file mode 100644
index 0000000..7b5c6a0
--- /dev/null
+++ b/db-4.8.30/test/test020.tcl
@@ -0,0 +1,141 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test020
+# TEST In-Memory database tests.
+proc test020 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test020 skipping for method $method"
+ return
+ }
+
+ if { [is_partitioned $args] == 1 } {
+ puts "Test020 skipping for partitioned $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # Check if we are using an env.
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test020: $method ($args) $nentries equal key/data pairs"
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test020_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test020.check
+ }
+ puts "\tTest020.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest020.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test020:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test020; keys and data are identical
+proc test020.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test020_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/test021.tcl b/db-4.8.30/test/test021.tcl
new file mode 100644
index 0000000..0a41c3c
--- /dev/null
+++ b/db-4.8.30/test/test021.tcl
@@ -0,0 +1,161 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test021
+# TEST Btree range tests.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self, reversed as key and self as data.
+# TEST After all are entered, retrieve each using a cursor SET_RANGE, and
+# TEST getting about 20 keys sequentially after it (in some cases we'll
+# TEST run out towards the end of the file).
+proc test021 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test021.db
+ set env NULL
+ } else {
+ set testfile test021.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test021: $method ($args) $nentries equal key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test021_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test021.check
+ }
+ puts "\tTest021.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key [reverse $str]
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and retrieve about 20
+ # records after it.
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest021.b: test ranges"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $dict]
+ set i 0
+ while { [gets $did str] != -1 && $i < $count } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key [reverse $str]
+ }
+
+ set r [$dbc get -set_range $key]
+ error_check_bad dbc_get:$key [string length $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+
+ for { set nrecs 0 } { $nrecs < 20 } { incr nrecs } {
+ set r [$dbc get "-next"]
+ # no error checking because we may run off the end
+ # of the database
+ if { [llength $r] == 0 } {
+ continue;
+ }
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+ }
+ incr i
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for test021; keys and data are reversed
+proc test021.check { key data } {
+ error_check_good "key/data mismatch for $key" $data [reverse $key]
+}
+
+proc test021_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/test022.tcl b/db-4.8.30/test/test022.tcl
new file mode 100644
index 0000000..9e9562b
--- /dev/null
+++ b/db-4.8.30/test/test022.tcl
@@ -0,0 +1,61 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test022
+# TEST Test of DB->getbyteswapped().
+proc test022 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test022 ($args) $omethod: DB->getbyteswapped()"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile1 "$testdir/test022a.db"
+ set testfile2 "$testdir/test022b.db"
+ set env NULL
+ } else {
+ set testfile1 "test022a.db"
+ set testfile2 "test022b.db"
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create two databases, one in each byte order.
+ set db1 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 1234} $testfile1]
+ error_check_good db1_open [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 4321} $testfile2]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+
+ # Call DB->get_byteswapped on both of them.
+ set db1_order [$db1 is_byteswapped]
+ set db2_order [$db2 is_byteswapped]
+
+ # Make sure that both answers are either 1 or 0,
+ # and that exactly one of them is 1.
+ error_check_good is_byteswapped_sensible_1 \
+ [expr ($db1_order == 1 && $db2_order == 0) || \
+ ($db1_order == 0 && $db2_order == 1)] 1
+
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ puts "\tTest022 complete."
+}
diff --git a/db-4.8.30/test/test023.tcl b/db-4.8.30/test/test023.tcl
new file mode 100644
index 0000000..d8696bb
--- /dev/null
+++ b/db-4.8.30/test/test023.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test023
+# TEST Duplicate test
+# TEST Exercise deletes and cursor operations within a duplicate set.
+# TEST Add a key with duplicates (first time on-page, second time off-page)
+# TEST Number the dups.
+# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+proc test023 { method args } {
+ global alphabet
+ global dupnum
+ global dupstr
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test023 skipping for btree with compression."
+ return
+ }
+
+ puts "Test023: $method delete duplicates/check cursor operations"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test023: skipping for method $omethod"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test023.db
+ set env NULL
+ } else {
+ set testfile test023.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ foreach i { onpage offpage } {
+ if { $i == "onpage" } {
+ set dupstr DUP
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ puts "\tTest023.a: Insert key w/$i dups"
+ set key "duplicate_val_test"
+ for { set count 0 } { $count < 20 } { incr count } {
+ set ret \
+ [eval {$db put} $txn $pflags {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ # Now let's get all the items and make sure they look OK.
+ puts "\tTest023.b: Check initial duplicates"
+ set dupnum 0
+ dump_file $db $txn $t1 test023.check
+
+ # Delete a couple of random items (FIRST, LAST one in middle)
+ # Make sure that current returns an error and that NEXT and
+ # PREV do the right things.
+
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ puts "\tTest023.c: Delete first and try gets"
+ # This should be the first duplicate
+ error_check_good \
+ dbc_get:SET $ret [list [list duplicate_val_test 0$dupstr]]
+
+ # Now delete it.
+ set ret [$dbc del]
+ error_check_good dbc_del:FIRST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good dbc_get:CURRENT $ret ""
+
+ # Now Prev should fail
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:prev0 [llength $ret] 0
+
+ # Now 10 nexts should work to get us in the middle
+ for { set j 1 } { $j <= 10 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.d: Delete middle and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:10 $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good dbc_get:deleted $ret ""
+
+ # Prev and Next should work
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 11$dupstr
+
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 9$dupstr
+
+ # Now go to the last one
+ for { set j 11 } { $j <= 19 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.e: Delete last and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:LAST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good dbc_get:deleted $ret ""
+
+ # Next should fail
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next19 [llength $ret] 0
+
+ # Prev should work
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 18$dupstr
+
+ # Now overwrite the current one, then count the number
+ # of data items to make sure that we have the right number.
+
+ puts "\tTest023.f: Count keys, overwrite current, count again"
+ # At this point we should have 17 keys the (initial 20 minus
+ # 3 deletes)
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+
+ set ret [$dbc put -current OVERWRITE]
+ error_check_good dbc_put:current $ret 0
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+ error_check_good dbc2_close [$dbc2 close] 0
+
+ # Done, delete all the keys for next iteration
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_delete $ret 0
+
+ # database should be empty
+
+ set ret [$dbc get -first]
+ error_check_good first_after_empty [llength $ret] 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test023; keys and data are identical
+proc test023.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "bad key" $key duplicate_val_test
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/db-4.8.30/test/test024.tcl b/db-4.8.30/test/test024.tcl
new file mode 100644
index 0000000..17bf537
--- /dev/null
+++ b/db-4.8.30/test/test024.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test024
+# TEST Record number retrieval test.
+# TEST Test the Btree and Record number get-by-number functionality.
+proc test024 { method {nentries 10000} args} {
+ source ./include.tcl
+ global rand_init
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test024: $method ($args)"
+
+ # Btree with compression does not support -recnum.
+ if { [is_compressed $args] == 1 } {
+ puts "Test024 skipping for compressed btree with -recnum."
+ return
+ }
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test024 skipping for method HASH"
+ return
+ }
+ if { [is_partitioned $args] } {
+ puts "Test024 skipping for partitioned $omethod"
+ return
+ }
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test024.db
+ set env NULL
+ } else {
+ set testfile test024.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest024.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ puts "\tTest024.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $k]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest024.c: dump file"
+
+ # Put sorted keys in file
+ set oid [open $t1 w]
+ foreach k $sorted_keys {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ # Instead of using dump_file; get all the keys by keynum
+ set oid [open $t2 w]
+ if { [string compare $omethod "-btree"] == 0 } {
+ set do_renumber 1
+ }
+
+ set gflags " -recno"
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ close $oid
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ error_check_good Test024.c:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest024.d: close, open, and dump file"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.d:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest024.e: close, open, and dump file in reverse direction"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Put sorted keys in file
+ set rsorted ""
+ foreach k $sorted_keys {
+ set rsorted [linsert $rsorted 0 $k]
+ }
+ set oid [open $t1 w]
+ foreach k $rsorted {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k $count } { $k > 0 } { incr k -1 } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.e:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now try deleting elements and making sure they work
+ puts "\tTest024.f: delete test"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ while { $count > 0 } {
+ set kndx [berkdb random_int 1 $count]
+ set kval [lindex $keys [expr $kndx - 1]]
+ set recno [expr [lsearch $sorted_keys $kval] + 1]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1 } {
+ set ret [eval {$db del} $txn {$recno}]
+ } else {
+ set ret [eval {$db del} $txn {$kval}]
+ }
+ error_check_good delete $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Remove the key from the key list
+ set ndx [expr $kndx - 1]
+ set keys [lreplace $keys $ndx $ndx]
+
+ if { $do_renumber == 1 } {
+ set r [expr $recno - 1]
+ set sorted_keys [lreplace $sorted_keys $r $r]
+ }
+
+ # Check that the keys after it have been renumbered
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $do_renumber == 1 && $recno != $count } {
+ set r [expr $recno - 1]
+ set ret [eval {$db get} $txn $gflags {$recno}]
+ error_check_good get_after_del \
+ [lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Decrement count
+ incr count -1
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test025.tcl b/db-4.8.30/test/test025.tcl
new file mode 100644
index 0000000..011a15e
--- /dev/null
+++ b/db-4.8.30/test/test025.tcl
@@ -0,0 +1,145 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test025
+# TEST DB_APPEND flag test.
+proc test025 { method {nentries 10000} {start 0 } {tnum "025"} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test$tnum: $method ($args)"
+
+ if { [string compare $omethod "-btree"] == 0 } {
+ puts "Test$tnum skipping for method BTREE"
+ return
+ }
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test$tnum skipping for method HASH"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ puts "\tTest$tnum.a: put/get loop"
+ set gflags " -recno"
+ set pflags " -append"
+ set txn ""
+ set checkfunc test025_check
+
+ # Here is the loop where we put and get each key/data pair
+ set count $start
+ set nentries [expr $start + $nentries]
+ if { $count != 0 } {
+ gets $did str
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
+ error_check_good db_put $ret $k
+
+ set ret [eval {$db get} $txn $gflags {$k}]
+ error_check_good \
+ get $ret [list [list $k [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # The recno key will be count + 1, so when we hit
+ # UINT32_MAX - 1, reset to 0.
+ if { $count == [expr 0xfffffffe] } {
+ set count 0
+ } else {
+ incr count
+ }
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -first -next $args
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest$tnum.d: close, open, and dump file in reverse direction"
+ eval open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -last -prev $args
+}
+
+proc test025_check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good " key/data mismatch for |$key|" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/test026.tcl b/db-4.8.30/test/test026.tcl
new file mode 100644
index 0000000..234554c
--- /dev/null
+++ b/db-4.8.30/test/test026.tcl
@@ -0,0 +1,159 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test026
+# TEST Small keys/medium data w/duplicates
+# TEST Put/get per key.
+# TEST Loop through keys -- delete each key
+# TEST ... test that cursors delete duplicates correctly
+# TEST
+# TEST Keyed delete test through cursor. If ndups is small; this will
+# TEST test on-page dups; if it's large, it will test off-page dups.
+proc test026 { method {nentries 2000} {ndups 5} {tnum "026"} args} {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the defaults down a bit.
+ # If we are wanting a lot of dups, set that
+ # down a bit or repl testing takes very long.
+ #
+ if { $nentries == 2000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ puts "Test$tnum: $method ($args) $nentries keys\
+ with $ndups dups; cursor delete test"
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ puts "\tTest$tnum.a: Put loop"
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
+ set datastr [ make_data_str $str ]
+ for { set j 1 } { $j <= $ndups} {incr j} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $j$datastr]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Now we will sequentially traverse the database getting each
+ # item and deleting it.
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest$tnum.b: Get/delete loop"
+ set i 1
+ for { set ret [$dbc get -first] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ if { $i == 1 } {
+ set curkey $key
+ }
+ error_check_good seq_get:key $key $curkey
+ error_check_good \
+ seq_get:data $data [pad_data $method $i[make_data_str $key]]
+
+ if { $i == $ndups } {
+ set i 1
+ } else {
+ incr i
+ }
+
+ # Now delete the key
+ set ret [$dbc del]
+ error_check_good db_del:$key $ret 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum.c: Verify empty file"
+ # Double check that file is now empty
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -first]
+ error_check_good get_on_empty [string length $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test027.tcl b/db-4.8.30/test/test027.tcl
new file mode 100644
index 0000000..2aafaf8
--- /dev/null
+++ b/db-4.8.30/test/test027.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test027
+# TEST Off-page duplicate test
+# TEST Test026 with parameters to force off-page duplicates.
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
+proc test027 { method {nentries 100} args} {
+ eval {test026 $method $nentries 100 "027"} $args
+}
diff --git a/db-4.8.30/test/test028.tcl b/db-4.8.30/test/test028.tcl
new file mode 100644
index 0000000..f0abf1f
--- /dev/null
+++ b/db-4.8.30/test/test028.tcl
@@ -0,0 +1,224 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test028
+# TEST Cursor delete test
+# TEST Test put operations after deleting through a cursor.
+proc test028 { method args } {
+ global dupnum
+ global dupstr
+ global alphabet
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test028: $method put after cursor delete test"
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test028 skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ set key 10
+ } else {
+ set key "put_after_cursor_del"
+ if { [is_compressed $args] == 0 } {
+ append args " -dup"
+ }
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test028.db
+ set env NULL
+ } else {
+ set testfile test028.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set ndups 20
+ set txn ""
+ set pflags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ foreach i { offpage onpage } {
+ foreach b { bigitem smallitem } {
+ if { $i == "onpage" } {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr DUP
+ }
+ } else {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ }
+
+ if { $b == "bigitem" } {
+ set dupstr [repeat $dupstr 10]
+ }
+ puts "\tTest028: $i/$b"
+
+ puts "\tTest028.a: Insert key with single data item"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ # Now let's get the item and make sure its OK.
+ puts "\tTest028.b: Check initial entry"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get \
+ $ret [list [list $key [pad_data $method $dupstr]]]
+
+ # Now try a put with NOOVERWRITE SET (should be error)
+ puts "\tTest028.c: No_overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete the item with a cursor
+ puts "\tTest028.d: Delete test"
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ puts "\tTest028.e: Reput the item"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ puts "\tTest028.f: Retrieve the item"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $ret \
+ [list [list $key [pad_data $method $dupstr]]]
+
+ # Delete the key to set up for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+
+ # Now repeat the above set of tests with
+ # duplicates (if not RECNO).
+ if { [is_record_based $method] == 1 ||\
+ [is_compressed $args] == 1 } {
+ continue;
+ }
+
+ puts "\tTest028.g: Insert key with duplicates"
+ for { set count 0 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $count$dupstr]}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.h: Check dups"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Try no_overwrite
+ puts "\tTest028.i: No_overwrite test"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key $dupstr}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete all the elements with a cursor
+ puts "\tTest028.j: Cursor Deletes"
+ set count 0
+ for { set ret [$dbc get -set $key] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good db_seq(key) $k $key
+ error_check_good db_seq(data) $d $count$dupstr
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+ incr count
+ if { $count == [expr $ndups - 1] } {
+ puts "\tTest028.k:\
+ Duplicate No_Overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key $dupstr}]
+ error_check_good db_put [is_substr \
+ $ret "DB_KEYEXIST"] 1
+ }
+ }
+
+ # Make sure all the items are gone
+ puts "\tTest028.l: Get after delete"
+ set ret [$dbc get -set $key]
+ error_check_good get_after_del [string length $ret] 0
+
+ puts "\tTest028.m: Reput the item"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key 0$dupstr}]
+ error_check_good db_put $ret 0
+ for { set count 1 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.n: Retrieve the item"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Clean out in prep for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test028; keys and data are identical
+proc test028.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "Bad key" $key put_after_cursor_del
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/db-4.8.30/test/test029.tcl b/db-4.8.30/test/test029.tcl
new file mode 100644
index 0000000..b6d73ae
--- /dev/null
+++ b/db-4.8.30/test/test029.tcl
@@ -0,0 +1,255 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test029
+# TEST Test the Btree and Record number renumbering.
+proc test029 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test029: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test029 skipping for method HASH"
+ return
+ }
+ # Btree with compression does not support -recnum.
+ if { [is_compressed $args] == 1 } {
+ puts "Test029 skipping for compressed btree with -recnum."
+ return
+ }
+ if { [is_partitioned $args] } {
+ puts "Test029 skipping for partitioned $omethod"
+ return
+ }
+ if { [is_record_based $method] == 1 && $do_renumber != 1 } {
+ puts "Test029 skipping for method RECNO (w/out renumbering)"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test029.db
+ set env NULL
+ } else {
+ set testfile test029.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ # Do not set nentries down to 100 until we
+ # fix SR #5958.
+ set nentries 1000
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest029.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+
+ # Save the first and last keys
+ set last_key [lindex $sorted_keys end]
+ set last_keynum [llength $sorted_keys]
+
+ set first_key [lindex $sorted_keys 0]
+ set first_keynum 1
+
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest029.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good dbput $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good dbget [lindex [lindex $ret 0] 1] $k
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now delete the first key in the database
+ puts "\tTest029.c: delete and verify renumber"
+
+ # Delete the first key in the file
+ if { [is_record_based $method] == 1 } {
+ set key $first_keynum
+ } else {
+ set key $first_key
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now we are ready to retrieve records based on
+ # record number
+ if { [string compare $omethod "-btree"] == 0 } {
+ append gflags " -recno"
+ }
+
+ # First try to get the old last key (shouldn't exist)
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_after_del $ret [list]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now try to get what we think should be the last key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create a cursor; we need it for the next test and we
+ # need it for recno here.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # OK, now re-put the first key and make sure that we
+ # renumber the last key appropriately.
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $first_key]}]
+ error_check_good db_put $ret 0
+ } else {
+ # Recno
+ set ret [$dbc get -first]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ getn_last_after_put [lindex [lindex $ret 0] 1] $last_key
+
+ # Now delete the first key in the database using a cursor
+ puts "\tTest029.d: delete with cursor and verify renumber"
+
+ set ret [$dbc get -first]
+ error_check_good dbc_first $ret [list [list $key $first_key]]
+
+ # Now delete at the cursor
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ # Now check the record numbers of the last keys again.
+ # First try to get the old last key (shouldn't exist)
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_last_after_cursor_del:$ret $ret [list]
+
+ # Now try to get what we think should be the last key
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key
+
+ # Re-put the first key and make sure that we renumber the last
+ # key appropriately. We can't do a c_put -current, so do
+ # a db put instead.
+ if { [string compare $omethod "-btree"] == 0 } {
+ puts "\tTest029.e: put (non-cursor) and verify renumber"
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $first_key]}]
+ error_check_good db_put $ret 0
+ } else {
+ puts "\tTest029.e: put with cursor and verify renumber"
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test030.tcl b/db-4.8.30/test/test030.tcl
new file mode 100644
index 0000000..1911583
--- /dev/null
+++ b/db-4.8.30/test/test030.tcl
@@ -0,0 +1,259 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test030
+# TEST Test DB_NEXT_DUP Functionality.
+proc test030 { method {nentries 10000} args } {
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test030 skipping for btree with compression."
+ return
+ }
+ if { [is_record_based $method] == 1 ||
+ [is_rbtree $method] == 1 } {
+ puts "Test030 skipping for method $method"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test030.db
+ set cntfile $testdir/cntfile.db
+ set env NULL
+ } else {
+ set testfile test030.db
+ set cntfile cntfile.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Use a second DB to keep track of how many duplicates
+ # we enter per key
+
+ set cntdb [eval {berkdb_open -create \
+ -mode 0644} $args {-btree $cntfile}]
+ error_check_good dbopen:cntfile [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add between 1 and 10 dups with values 1 ... dups
+ # We'll verify each addition.
+
+ set did [open $dict]
+ puts "\tTest030.a: put and get duplicate keys."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ndup [berkdb random_int 1 10]
+
+ for { set i 1 } { $i <= $ndup } { incr i 1 } {
+ set ctxn ""
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn \
+ [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb put} \
+ $ctxn $pflags {$str [chop_data $method $ndup]}]
+ error_check_good put_cnt $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+
+ if { [llength $ret] == 0 } {
+ break
+ }
+ incr x
+
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good Test030:put $d $str
+
+ set id [ id_of $datastr ]
+ error_check_good Test030:dup# $id $x
+ }
+ error_check_good Test030:numdups $x $ndup
+
+ # Now retrieve them backwards
+ for {set ret [$dbc get -prev]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -prevdup] } {
+
+ if { [llength $ret] == 0 } {
+ break
+ }
+
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ incr x -1
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good Test030:put $d $str
+
+ set id [ id_of $datastr ]
+ error_check_good Test030:dup# $id $x
+ }
+ error_check_good Test030:numdups $x 1
+ incr count
+ }
+ close $did
+
+ # Verify on sequential pass of entire file
+ puts "\tTest030.b: sequential check"
+
+ # We can't just set lastkey to a null string, since that might
+ # be a key now!
+ set lastkey "THIS STRING WILL NEVER BE A KEY"
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+
+ # Outer loop should always get a new key
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_bad outer_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good outer_get_loop:data $d $k
+ error_check_good outer_get_loop:id $id 1
+
+ set lastkey $k
+ # Figure out how may dups we should have
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb get} $ctxn $pflags {$k}]
+ set ndup [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+
+ set howmany 1
+ for { set ret [$dbc get -nextdup] } \
+ { [llength $ret] != 0 } \
+ { set ret [$dbc get -nextdup] } {
+ incr howmany
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good inner_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $howmany
+
+ }
+ error_check_good ndups_found $howmany $ndup
+ }
+
+ # Verify on key lookup
+ puts "\tTest030.c: keyed check"
+ set cnt_dbc [$cntdb cursor]
+ for {set ret [$cnt_dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$cnt_dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+
+ set howmany [lindex [lindex $ret 0] 1]
+ error_check_bad cnt_seq:data [string length $howmany] 0
+
+ set i 0
+ for {set ret [$dbc get -set $k]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr i
+
+ set k [lindex [lindex $ret 0] 0]
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $i
+ }
+ error_check_good keyed_count $i $howmany
+
+ }
+ error_check_good cnt_curs_close [$cnt_dbc close] 0
+ error_check_good db_curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good cnt_file_close [$cntdb close] 0
+ error_check_good db_file_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test031.tcl b/db-4.8.30/test/test031.tcl
new file mode 100644
index 0000000..3ed6e95
--- /dev/null
+++ b/db-4.8.30/test/test031.tcl
@@ -0,0 +1,234 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test031
+# TEST Duplicate sorting functionality
+# TEST Make sure DB_NODUPDATA works.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and "ndups" duplicates
+# TEST For the data field, prepend random five-char strings (see test032)
+# TEST that we force the duplicate sorting code to do something.
+# TEST Along the way, test that we cannot insert duplicate duplicates
+# TEST using DB_NODUPDATA.
+# TEST
+# TEST By setting ndups large, we can make this an off-page test
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test031 { method {nentries 10000} {ndups 5} {tnum "031"} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set checkargs [split_partition_args $args]
+
+ # The checkdb is of type hash so it can't use compression.
+ set checkargs [strip_compression_args $checkargs]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append checkargs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test$tnum: \
+ $method ($args) $nentries small $ndups sorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup -dupsort $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $checkargs {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest$tnum.a: Put/get loop, check nodupdata"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ if { $i == 2 } {
+ set nodupstr $datastr
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Test DB_NODUPDATA using the DB handle
+ set ret [eval {$db put -nodupdata} \
+ $txn $pflags {$str [chop_data $method $nodupstr]}]
+ error_check_good db_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ # Test DB_NODUPDATA using cursor handle
+ set ret [$dbc get -set $str]
+ error_check_bad dbc_get [llength $ret] 0
+ set datastr [lindex [lindex $ret 0] 1]
+ error_check_bad dbc_data [string length $datastr] 0
+ set ret [eval {$dbc put -nodupdata} \
+ {$str [chop_data $method $datastr]}]
+ error_check_good dbc_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare \
+ $lastdup [pad_data $method $datastr]] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.b: Checking file for correct duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE
+
+ set lastkey "THIS WILL NEVER BE A KEY VALUE"
+ # no need to delete $lastkey
+ set firsttimethru 1
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ if { [string compare $k $lastkey] != 0 } {
+ # Remove last key from the checkdb
+ if { $firsttimethru != 1 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+ set firsttimethru 0
+ set lastdup ""
+ set lastkey $k
+ set dups [lindex [lindex [eval {$check_db get} \
+ $txn {$k}] 0] 1]
+ error_check_good check_db:get:$k \
+ [string length $dups] [expr $ndups * 4]
+ }
+
+ if { [string compare $lastdup $d] > 0 } {
+ error_check_good dup_check:$k:$d 0 1
+ }
+ set lastdup $d
+
+ set pref [string range $d 0 3]
+ set ndx [string first $pref $dups]
+ error_check_good valid_duplicate [expr $ndx >= 0] 1
+ set a [string range $dups 0 [expr $ndx - 1]]
+ set b [string range $dups [expr $ndx + 4] end]
+ set dups $a$b
+ }
+ # Remove last key from the checkdb
+ if { [string length $lastkey] != 0 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+
+ # Make sure there is nothing left in check_db
+
+ set check_c [eval {$check_db cursor} $txn]
+ set ret [$check_c get -first]
+ error_check_good check_c:get:$ret [llength $ret] 0
+ error_check_good check_c:close [$check_c close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test032.tcl b/db-4.8.30/test/test032.tcl
new file mode 100644
index 0000000..7bffa13
--- /dev/null
+++ b/db-4.8.30/test/test032.tcl
@@ -0,0 +1,266 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test032
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test. By setting overflow to be 1, we can make
+# TEST this an overflow test.
+# TEST
+# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file
+# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+# TEST the unique key prefix (cursor only). Finally test the failure case.
+proc test032 { method {nentries 10000} {ndups 5} {tnum "032"}
+ {overflow 0} args } {
+ global alphabet rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set checkargs [split_partition_args $args]
+
+ # The checkdb is of type hash so it can't use compression.
+ set checkargs [strip_compression_args $checkargs]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append checkargs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set dataset "small"
+ if {$overflow != 0} {
+ set dataset "large"
+ }
+ puts "Test$tnum:\
+ $method ($args) $nentries $dataset sorted $ndups dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $checkargs {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+ set len 4
+
+ #
+ # Find the pagesize if we are testing with overflow pages. We will
+ # use the pagesize to build overflow items of the correct size.
+ #
+ if {$overflow != 0} {
+ set stat [$db stat]
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set len $pg
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ set prefix ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set prefix [randstring]
+
+ #
+ # Pad the data string so that overflow data items
+ # are large enough to generate overflow pages.
+ #
+ for { set j 1} { $j <= [expr $len / 4 - 1] } \
+ { incr j } {
+ append prefix "!@#$"
+ }
+
+ set dups $dups$prefix
+ set datastr $prefix:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+
+ error_check_good "Test$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.b: Checking file for correct duplicates (no cursor)"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good check_c_open(2) \
+ [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < [expr $len * $ndups]} {incr ndx $len} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set prefix [string range $d $ndx \
+ [expr $ndx + [expr $len - 1] ] ]
+ set data $prefix:$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good \
+ get_both_data:$k $ret [list [list $k $data]]
+ }
+ }
+
+ $db sync
+
+ # Now repeat the above test using cursor ops
+ puts "\tTest$tnum.c: Checking file for correct duplicates (cursor)"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ for {set ndx 0} {$ndx < [expr $len * $ndups]} {incr ndx $len} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set prefix [string range $d $ndx \
+ [expr $ndx + [ expr $len - 1]]]
+ set data $prefix:$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good \
+ curs_get_both_data:$k $ret [list [list $k $data]]
+
+ set ret [eval {$dbc get} {-get_both_range $k $prefix}]
+ error_check_good \
+ curs_get_both_range:$k $ret [list [list $k $data]]
+ }
+ }
+
+ # Now check the error case
+ puts "\tTest$tnum.d: Check error case (no cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ # Now check the error case
+ puts "\tTest$tnum.e: Check error case (cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test033.tcl b/db-4.8.30/test/test033.tcl
new file mode 100644
index 0000000..46a4e93
--- /dev/null
+++ b/db-4.8.30/test/test033.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test033
+# TEST DB_GET_BOTH without comparison function
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and data; add duplicate records for each. After all are
+# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+# TEST nonexistent keys.
+# TEST
+# TEST XXX
+# TEST This does not work for rbtree.
+proc test033 { method {nentries 10000} {ndups 5} {tnum "033"} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test$tnum: $method ($args) $nentries small $ndups dup key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ # Duplicate data entries are not allowed in record based methods.
+ if { [is_record_based $method] == 1 } {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args {$testfile}]
+ } else {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ }
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Allocate a cursor for DB_GET_BOTH_RANGE.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest$tnum.a: Put/get loop."
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ } else {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good db_put $ret 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key and dup
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+
+ close $did
+
+ puts "\tTest$tnum.b: Verifying DB_GET_BOTH after creation."
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Now retrieve all the keys matching this key
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+ close $did
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# No testing of dups is done on record-based methods.
+proc test033_recno.check {db dbc method str txn key} {
+ set ret [eval {$db get} $txn {-recno $key}]
+ error_check_good "db_get:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ set ret [$dbc get -get_both $key [pad_data $method $str]]
+ error_check_good "db_get_both:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+}
+
+# Testing of non-record-based methods includes duplicates
+# and get_both_range.
+proc test033_check {db dbc method str txn ndups} {
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "db_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good "dbc_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good "dbc_get_both_range:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good db_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good dbc_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good dbc_get_both_range [llength $ret] 0
+}
diff --git a/db-4.8.30/test/test034.tcl b/db-4.8.30/test/test034.tcl
new file mode 100644
index 0000000..28e5dfd
--- /dev/null
+++ b/db-4.8.30/test/test034.tcl
@@ -0,0 +1,35 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test034
+# TEST test032 with off-page or overflow case with non-duplicates
+# TEST and duplicates.
+# TEST
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page
+# TEST or overflow case within non-duplicates and duplicates.
+proc test034 { method {nentries 10000} args} {
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test034: Skipping for specific pagesizes"
+ return
+ }
+
+ # Test without duplicate and without overflow.
+ eval {test032 $method $nentries 1 "034" 0} $args
+
+ # Test without duplicate but with overflows.
+ eval {test032 $method [expr $nentries / 100] 1 "034" 1} $args
+
+ # Test with off-page duplicates
+ eval {test032 $method $nentries 20 "034" 0 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test032 $method [expr $nentries / 10] 100 "034" 0 -pagesize 512} \
+ $args
+
+ # Test with overflow duplicate.
+ eval {test032 $method [expr $nentries / 100] 5 "034" 1} $args
+}
diff --git a/db-4.8.30/test/test035.tcl b/db-4.8.30/test/test035.tcl
new file mode 100644
index 0000000..51e3c80
--- /dev/null
+++ b/db-4.8.30/test/test035.tcl
@@ -0,0 +1,21 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test035
+# TEST Test033 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test035 { method {nentries 10000} args} {
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test035: Skipping for specific pagesizes"
+ return
+ }
+ # Test with off-page duplicates
+ eval {test033 $method $nentries 20 "035" -pagesize 512} $args
+ # Test with multiple pages of off-page duplicates
+ eval {test033 $method [expr $nentries / 10] 100 "035" -pagesize 512} \
+ $args
+}
diff --git a/db-4.8.30/test/test036.tcl b/db-4.8.30/test/test036.tcl
new file mode 100644
index 0000000..d85a52e
--- /dev/null
+++ b/db-4.8.30/test/test036.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test036
+# TEST Test KEYFIRST and KEYLAST when the key doesn't exist
+# TEST Put nentries key/data pairs (from the dictionary) using a cursor
+# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor
+# TEST put for non-existent keys).
+proc test036 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ puts "Test036 skipping for method recno"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test036.db
+ set env NULL
+ } else {
+ set testfile test036.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test036: $method ($args) $nentries equal key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test036_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test036.check
+ }
+ puts "\tTest036.a: put/get loop KEYFIRST"
+ # Here is the loop where we put and get each key/data pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $pflags {-keyfirst $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest036.a: put/get loop KEYLAST"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $txn $pflags {-keylast $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest036.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+}
+
+# Check function for test036; keys and data are identical
+proc test036.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test036_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/db-4.8.30/test/test037.tcl b/db-4.8.30/test/test037.tcl
new file mode 100644
index 0000000..0cccb96
--- /dev/null
+++ b/db-4.8.30/test/test037.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test037
+# TEST Test DB_RMW
+proc test037 { method {nentries 100} args } {
+ global encrypt
+
+ source ./include.tcl
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test037 skipping for env $env"
+ return
+ }
+
+ puts "Test037: RMW $method"
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ # Create the database
+ env_cleanup $testdir
+ set testfile test037.db
+
+ set local_env \
+ [eval {berkdb_env -create -mode 0644 -txn} \
+ $encargs $pageargs -home $testdir]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ set db [eval {berkdb_open -env $local_env \
+ -create -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest037.a: Creating database"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ incr count
+ }
+ close $did
+ error_check_good dbclose [$db close] 0
+ error_check_good envclode [$local_env close] 0
+
+ puts "\tTest037.b: Setting up environments"
+
+ # Open local environment
+ set env_cmd \
+ [concat berkdb_env -create -txn $encargs $pageargs -home $testdir]
+ set local_env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote environment
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good \
+ remote:txn_open [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now try put test without RMW. Gets on one site should not
+ # lock out gets on another.
+
+ # Open databases and dictionary
+ puts "\tTest037.c: Opening databases"
+ set did [open $dict]
+ set rkey 0
+
+ set db [eval {berkdb_open -auto_commit -env $local_env } $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set rdb [send_cmd $f1 \
+ "berkdb_open -auto_commit -env $remote_env $args -mode 0644 $testfile"]
+ error_check_good remote:dbopen [is_valid_db $rdb] TRUE
+
+ puts "\tTest037.d: Testing without RMW"
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn} $gflags {$key}]
+ error_check_good local_get [lindex [lindex $rec 0] 1] \
+ [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good no_rmw_get:remote_time [expr $remote_time <= 1] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ puts "\tTest037.e: Testing with RMW"
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good \
+ txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good remote:txn_open \
+ [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn -rmw} $gflags {$key}]
+ error_check_good \
+ local_get [lindex [lindex $rec 0] 1] [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good rmw_get:remote_time [expr $remote_time > 4] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ # Close everything up: remote first
+ set r [send_cmd $f1 "$rdb close"]
+ error_check_good remote_db_close $r 0
+
+ set r [send_cmd $f1 "$remote_env close"]
+
+ # Close locally
+ error_check_good db_close [$db close] 0
+ $local_env close
+ close $did
+ close $f1
+}
diff --git a/db-4.8.30/test/test038.tcl b/db-4.8.30/test/test038.tcl
new file mode 100644
index 0000000..0e10281
--- /dev/null
+++ b/db-4.8.30/test/test038.tcl
@@ -0,0 +1,232 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test038
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test038 { method {nentries 10000} {ndups 5} {tnum "038"} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set checkargs [split_partition_args $args]
+
+ # The checkdb is of type hash so it can't use compression.
+ set checkargs [strip_compression_args $checkargs]
+
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append checkargs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test$tnum: \
+ $method ($args) $nentries small sorted dup key/data pairs"
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644 -hash} $checkargs {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good sorted_dups($lastdup,$datastr)\
+ 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc get -get_both_range $k $pref]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+
+ # We should either not find anything (if deleting the
+ # largest duplicate in the set) or a duplicate that
+ # sorts larger than the one we deleted.
+ set ret [$dbc get -get_both_range $k $pref]
+ if { [llength $ret] != 0 } {
+ set datastr [lindex [lindex $ret 0] 1]]
+ if {[string compare \
+ $pref [lindex [lindex $ret 0] 1]] >= 0} {
+ error_check_good \
+ error_case_range:sorted_dups($pref,$datastr) 0 1
+ }
+ }
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret \
+ [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test039.tcl b/db-4.8.30/test/test039.tcl
new file mode 100644
index 0000000..b1283ac
--- /dev/null
+++ b/db-4.8.30/test/test039.tcl
@@ -0,0 +1,217 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test039
+# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+# TEST function.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test039 { method {nentries 10000} {ndups 5} {tnum "039"} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set checkargs [split_partition_args $args]
+ set omethod [convert_method $method]
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append checkargs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test$tnum: $method $nentries \
+ small $ndups unsorted dup key/data pairs"
+
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval \
+ {berkdb_open -create -mode 0644 -hash} $checkargs {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ set xx [expr $x * 3]
+ set check_data \
+ [string range $dups $xx [expr $xx + 1]]:$k
+ error_check_good retrieve $datastr $check_data
+ incr x
+ }
+ error_check_good "Test$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [$dbc get -get_both $k $data]
+ error_check_good get_both:$k [llength $ret] 0
+
+ set ret [$dbc get -get_both_range $k $data]
+ error_check_good get_both_range:$k [llength $ret] 0
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test040.tcl b/db-4.8.30/test/test040.tcl
new file mode 100644
index 0000000..d23ff34
--- /dev/null
+++ b/db-4.8.30/test/test040.tcl
@@ -0,0 +1,22 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test040
+# TEST Test038 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test040 { method {nentries 10000} args} {
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test040: skipping for specific pagesizes"
+ return
+ }
+ # Test with off-page duplicates
+ eval {test038 $method $nentries 20 "040" -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test038 $method [expr $nentries / 10] 100 "040" -pagesize 512} \
+ $args
+}
diff --git a/db-4.8.30/test/test041.tcl b/db-4.8.30/test/test041.tcl
new file mode 100644
index 0000000..65f7c2a
--- /dev/null
+++ b/db-4.8.30/test/test041.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test041
+# TEST Test039 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test041 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test039 $method $nentries 20 "041" -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test039 $method [expr $nentries / 10] 100 "041" -pagesize 512} \
+ $args
+}
diff --git a/db-4.8.30/test/test042.tcl b/db-4.8.30/test/test042.tcl
new file mode 100644
index 0000000..41c308c
--- /dev/null
+++ b/db-4.8.30/test/test042.tcl
@@ -0,0 +1,186 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test042
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Multiprocess DB test; verify that locking is working for the
+# TEST concurrent access method product.
+# TEST
+# TEST Use the first "nentries" words from the dictionary. Insert each with
+# TEST self as key and a fixed, medium length data string. Then fire off
+# TEST multiple processes that bang on the database. Each one should try to
+# TEST read and write random keys. When they rewrite, they'll append their
+# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
+# TEST partial put). Some will use cursors to traverse through a few keys
+# TEST before finding one to write.
+
+proc test042 { method {nentries 1000} args } {
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test042 skipping for env $env"
+ return
+ }
+
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test042 skipping for security"
+ return
+ }
+ test042_body $method $nentries 0 $args
+ test042_body $method $nentries 1 $args
+}
+
+proc test042_body { method nentries alldb args } {
+ source ./include.tcl
+
+ if { $alldb } {
+ set eflag "-cdb -cdb_alldb"
+ } else {
+ set eflag "-cdb"
+ }
+ puts "Test042: CDB Test ($eflag) $method $nentries"
+
+ # Set initial parameters
+ set do_exit 0
+ set iter 10000
+ set procs 5
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -dir { incr i; set testdir [lindex $args $i] }
+ -iter { incr i; set iter [lindex $args $i] }
+ -procs { incr i; set procs [lindex $args $i] }
+ -exit { set do_exit 1 }
+ default { append oargs " " [lindex $args $i] }
+ }
+ }
+
+ set pageargs ""
+ set args [split_pageargs $args pageargs]
+
+ # Create the database and open the dictionary
+ set basename test042
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create} $eflag $pageargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Env is created, now set up database
+ test042_dbinit $env $nentries $method $oargs $basename.0.db
+ if { $alldb } {
+ for { set i 1 } {$i < $procs} {incr i} {
+ test042_dbinit $env $nentries $method $oargs \
+ $basename.$i.db
+ }
+ }
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env \
+ -create -cachesize {0 1048576 1}} $pageargs $eflag -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Now spawn off processes
+ berkdb debug_check
+ puts "\tTest042.b: forking off $procs children"
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { $alldb } {
+ set tf $basename.$i.db
+ } else {
+ set tf $basename.0.db
+ }
+ puts "exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log \
+ $method $testdir $tf $nentries $iter $i $procs $args &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log $method \
+ $testdir $tf $nentries $iter $i $procs $args &]
+ lappend pidlist $p
+ }
+ puts "Test042: $procs independent processes now running"
+ watch_procs $pidlist
+
+ # Make sure we haven't added or lost any entries.
+ set dblist [glob $testdir/$basename.*.db]
+ foreach file $dblist {
+ set tf [file tail $file]
+ set db [eval {berkdb_open -env $env} $oargs $tf]
+ set statret [$db stat]
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Number of records}] == 0 } {
+ set numrecs [lindex $pair 1]
+ break
+ }
+ }
+ error_check_good nentries $numrecs $nentries
+ error_check_good db_close [$db close] 0
+ }
+
+ # Check for test failure
+ set errstrings [eval findfail [glob $testdir/test042.*.log]]
+ foreach str $errstrings {
+ puts "FAIL: error message in log file: $str"
+ }
+
+ # Test is done, blow away lock and mpool region
+ reset_env $env
+}
+
+proc test042_dbinit { env nentries method oargs tf } {
+ global datastr
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs $tf]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest042.a: put loop $tf"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+}
diff --git a/db-4.8.30/test/test043.tcl b/db-4.8.30/test/test043.tcl
new file mode 100644
index 0000000..2f141c3
--- /dev/null
+++ b/db-4.8.30/test/test043.tcl
@@ -0,0 +1,191 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test043
+# TEST Recno renumbering and implicit creation test
+# TEST Test the Record number implicit creation and renumbering options.
+proc test043 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test043: $method ($args)"
+
+ if { [is_record_based $method] != 1 } {
+ puts "Test043 skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test043.db
+ set env NULL
+ } else {
+ set testfile test043.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags " -recno"
+ set txn ""
+
+ # First test implicit creation and retrieval
+ set count 1
+ set interval 5
+ if { $nentries < $interval } {
+ set nentries [expr $interval + 1]
+ }
+ puts "\tTest043.a: insert keys at $interval record intervals"
+ while { $count <= $nentries } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$count [chop_data $method $count]}]
+ error_check_good "$db put $count" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ set last $count
+ incr count $interval
+ }
+
+ puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [pad_data $method [lindex [lindex $rec 0] 1]]
+ error_check_good "$dbc get key==data" [pad_data $method $k] $d
+ error_check_good "$dbc get sequential" $k $check
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ incr check $interval
+ }
+
+ # Now make sure that we get DB_KEYEMPTY for non-existent keys
+ puts "\tTest043.c: Retrieve non-existent keys"
+ global errorInfo
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+
+ set ret [eval {$db get} $txn $gflags {[expr $k + 1]}]
+ error_check_good "$db \
+ get [expr $k + 1]" $ret [list]
+
+ incr check $interval
+ # Make sure we don't do a retrieve past the end of file
+ if { $check >= $last } {
+ break
+ }
+ }
+
+ # Now try deleting and make sure the right thing happens.
+ puts "\tTest043.d: Delete tests"
+ set rec [$dbc get -first]
+ error_check_bad "$dbc get -first" [llength $rec] 0
+ error_check_good "$dbc get -first key" [lindex [lindex $rec 0] 0] 1
+ error_check_good "$dbc get -first data" \
+ [lindex [lindex $rec 0] 1] [pad_data $method 1]
+
+ # Delete the first item
+ error_check_good "$dbc del" [$dbc del] 0
+
+ # Retrieving 1 should always fail
+ set ret [eval {$db get} $txn $gflags {1}]
+ error_check_good "$db get 1" $ret [list]
+
+ # Now, retrieving other keys should work; keys will vary depending
+ # upon renumbering.
+ if { $do_renumber == 1 } {
+ set count [expr 0 + $interval]
+ set max [expr $nentries - 1]
+ } else {
+ set count [expr 1 + $interval]
+ set max $nentries
+ }
+
+ while { $count <= $max } {
+ set rec [eval {$db get} $txn $gflags {$count}]
+ if { $do_renumber == 1 } {
+ set data [expr $count + 1]
+ } else {
+ set data $count
+ }
+ error_check_good "$db get $count" \
+ [pad_data $method $data] [lindex [lindex $rec 0] 1]
+ incr count $interval
+ }
+ set max [expr $count - $interval]
+
+ puts "\tTest043.e: Verify LAST/PREV functionality"
+ set count $max
+ for { set rec [$dbc get -last] } { [llength $rec] != 0 } {
+ set rec [$dbc get -prev] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [lindex [lindex $rec 0] 1]
+ if { $do_renumber == 1 } {
+ set data [expr $k + 1]
+ } else {
+ set data $k
+ }
+ error_check_good \
+ "$dbc get key==data" [pad_data $method $data] $d
+ error_check_good "$dbc get sequential" $k $count
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ set count [expr $count - $interval]
+ if { $count < 1 } {
+ break
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test044.tcl b/db-4.8.30/test/test044.tcl
new file mode 100644
index 0000000..fbe09f6
--- /dev/null
+++ b/db-4.8.30/test/test044.tcl
@@ -0,0 +1,262 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test044
+# TEST Small system integration tests
+# TEST Test proper functioning of the checkpoint daemon,
+# TEST recovery, transactions, etc.
+# TEST
+# TEST System integration DB test: verify that locking, recovery, checkpoint,
+# TEST and all the other utilities basically work.
+# TEST
+# TEST The test consists of $nprocs processes operating on $nfiles files. A
+# TEST transaction consists of adding the same key/data pair to some random
+# TEST number of these files. We generate a bimodal distribution in key size
+# TEST with 70% of the keys being small (1-10 characters) and the remaining
+# TEST 30% of the keys being large (uniform distribution about mean $key_avg).
+# TEST If we generate a key, we first check to make sure that the key is not
+# TEST already in the dataset. If it is, we do a lookup.
+#
+# XXX
+# This test uses grow-only files currently!
+proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
+ source ./include.tcl
+ global encrypt
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+
+ berkdb srand $rand_init
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test044 skipping for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Test044 skipping for security"
+ return
+ }
+
+ puts "Test044: system integration test db $method $nprocs processes \
+ on $nfiles files"
+
+ # Parse options
+ set otherargs ""
+ set key_avg 10
+ set data_avg 20
+ set do_exit 0
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -key_avg { incr i; set key_avg [lindex $args $i] }
+ -data_avg { incr i; set data_avg [lindex $args $i] }
+ -testdir { incr i; set testdir [lindex $args $i] }
+ -x.* { set do_exit 1 }
+ default {
+ lappend otherargs [lindex $args $i]
+ }
+ }
+ }
+
+ if { $cont == 0 } {
+ # Create the database and open the dictionary
+ env_cleanup $testdir
+
+ # Create an environment. Bump up the log region because
+ # we will create lots of files. This is especially
+ # needed when we test partitioned databases.
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_lg_regionmax 200000"
+ close $cid
+
+ puts "\tTest044.a: creating environment and $nfiles files"
+ set dbenv \
+ [eval {berkdb_env -create -txn} $pageargs -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Create a bunch of files
+ set m $method
+
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ if { $method == "all" } {
+ switch [berkdb random_int 1 2] {
+ 1 { set m -btree }
+ 2 { set m -hash }
+ }
+ } else {
+ set m $omethod
+ }
+
+ set db [eval {berkdb_open -env $dbenv -create \
+ -mode 0644 $m} $otherargs {test044.$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ }
+ }
+
+ # Close the environment
+ $dbenv close
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Database is created, now fork off the kids.
+ puts "\tTest044.b: forking off $nprocs processes and utilities"
+ set cycle 1
+ set ncycles 3
+ while { $cycle <= $ncycles } {
+ set dbenv \
+ [eval {berkdb_env -create -txn} $pageargs -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Fire off deadlock detector and checkpointer
+ puts "Beginning cycle $cycle"
+ set ddpid [exec $util_path/db_deadlock -h $testdir -t 5 &]
+ set cppid [exec $util_path/db_checkpoint -h $testdir -p 2 &]
+ puts "Deadlock detector: $ddpid Checkpoint daemon $cppid"
+
+ set pidlist {}
+ for { set i 0 } {$i < $nprocs} {incr i} {
+ set p [exec $tclsh_path \
+ $test_path/sysscript.tcl $testdir \
+ $nfiles $key_avg $data_avg $omethod $args\
+ >& $testdir/test044.$i.log &]
+ lappend pidlist $p
+ }
+ set sleep [berkdb random_int 300 600]
+ puts \
+"[timestamp] $nprocs processes running $pidlist for $sleep seconds"
+ tclsleep $sleep
+
+ # Now simulate a crash
+ puts "[timestamp] Crashing"
+
+ #
+ # The environment must remain open until this point to get
+ # proper sharing (using the paging file) on Win/9X. [#2342]
+ #
+ error_check_good env_close [$dbenv close] 0
+
+ tclkill $ddpid
+ tclkill $cppid
+
+ foreach p $pidlist {
+ tclkill $p
+ }
+
+ # Check for test failure
+ set errstrings [eval findfail [glob $testdir/test044.*.log]]
+ foreach str $errstrings {
+ puts "FAIL: error message in log file: $str"
+ }
+
+ # Now run recovery
+ eval test044_verify $testdir $nfiles $otherargs
+ incr cycle
+ }
+}
+
+proc test044_usage { } {
+ puts -nonewline "test044 method nentries [-d directory] [-i iterations]"
+ puts " [-p procs] -x"
+}
+
+proc test044_verify { dir nfiles args} {
+ source ./include.tcl
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save1
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save1
+# }
+# }
+
+ # Run recovery and then read through all the database files to make
+ # sure that they all look good.
+
+ puts "\tTest044.verify: Running recovery and verifying file contents"
+ set stat [catch {exec $util_path/db_recover -h $dir} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save2
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save2
+# }
+# }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ set db($f) [eval {berkdb_open} $args {$dir/test044.$f.db}]
+ error_check_good $f:dbopen [is_valid_db $db($f)] TRUE
+
+ set cursors($f) [$db($f) cursor]
+ error_check_bad $f:cursor_open $cursors($f) NULL
+ error_check_good \
+ $f:cursor_open [is_substr $cursors($f) $db($f)] 1
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ for {set d [$cursors($f) get -first] } \
+ { [string length $d] != 0 } \
+ { set d [$cursors($f) get -next] } {
+
+ set k [lindex [lindex $d 0] 0]
+ set d [lindex [lindex $d 0] 1]
+
+ set flist [zero_list $nfiles]
+ set r $d
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $flist $fnum] == 0 } {
+ set fl "-set"
+ } else {
+ set fl "-next"
+ }
+
+ if { $fl != "-set" || $fnum != $f } {
+ if { [string compare $fl "-set"] == 0} {
+ set full [$cursors($fnum) \
+ get -set $k]
+ } else {
+ set full [$cursors($fnum) \
+ get -next]
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [lindex [lindex $full 0] 1]
+ error_check_good \
+ $f:dbget_$fnum:key $key $k
+ error_check_good \
+ $f:dbget_$fnum:data $rec $d
+ }
+
+ set flist [lreplace $flist $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ error_check_good $cursors($f) [$cursors($f) close] 0
+ error_check_good db_close:$f [$db($f) close] 0
+ }
+}
diff --git a/db-4.8.30/test/test045.tcl b/db-4.8.30/test/test045.tcl
new file mode 100644
index 0000000..d762498
--- /dev/null
+++ b/db-4.8.30/test/test045.tcl
@@ -0,0 +1,125 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test045
+# TEST Small random tester
+# TEST Runs a number of random add/delete/retrieve operations.
+# TEST Tests both successful conditions and error conditions.
+# TEST
+# TEST Run the random db tester on the specified access method.
+#
+# Options are:
+# -adds <maximum number of keys before you disable adds>
+# -cursors <number of cursors>
+# -dataavg <average data size>
+# -delete <minimum number of keys before you disable deletes>
+# -dups <allow duplicates in file>
+# -errpct <Induce errors errpct of the time>
+# -init <initial number of entries in database>
+# -keyavg <average key size>
+proc test045 { method {nops 10000} args } {
+ source ./include.tcl
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test045 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test045 skipping for security"
+ return
+ }
+ set omethod [convert_method $method]
+
+ puts "Test045: Random tester on $method for $nops operations"
+
+ # Set initial parameters
+ set adds [expr $nops * 10]
+ set cursors 5
+ set dataavg 40
+ set delete $nops
+ set dups 0
+ set errpct 0
+ set init 0
+ if { [is_record_based $method] == 1 } {
+ set keyavg 10
+ } else {
+ set keyavg 25
+ }
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -adds { incr i; set adds [lindex $args $i] }
+ -cursors { incr i; set cursors [lindex $args $i] }
+ -dataavg { incr i; set dataavg [lindex $args $i] }
+ -delete { incr i; set delete [lindex $args $i] }
+ -dups { incr i; set dups [lindex $args $i] }
+ -errpct { incr i; set errpct [lindex $args $i] }
+ -init { incr i; set init [lindex $args $i] }
+ -keyavg { incr i; set keyavg [lindex $args $i] }
+ -extent { incr i;
+ lappend oargs "-extent" "100" }
+ default { lappend oargs [lindex $args $i] }
+ }
+ }
+
+ # Create the database and and initialize it.
+ set root $testdir/test045
+ set f $root.db
+ env_cleanup $testdir
+
+ # Run the script with 3 times the number of initial elements to
+ # set it up.
+ set db [eval {berkdb_open \
+ -create -mode 0644 $omethod} $oargs {$f}]
+ error_check_good dbopen:$f [is_valid_db $db] TRUE
+
+ set r [$db close]
+ error_check_good dbclose:$f $r 0
+
+ # We redirect standard out, but leave standard error here so we
+ # can see errors.
+
+ puts "\tTest045.a: Initializing database"
+ if { $init != 0 } {
+ set n [expr 3 * $init]
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f $n \
+ 1 $init $n $keyavg $dataavg $dups 0 -1 \
+ > $testdir/test045.init
+ }
+ # Check for test failure
+ set initerrs [findfail $testdir/test045.init]
+ foreach str $initerrs {
+ puts "FAIL: error message in .init file: $str"
+ }
+
+ puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
+ # Now the database is initialized, run a test
+ puts "$tclsh_path\
+ $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \
+ $keyavg $dataavg $dups $errpct $oargs > $testdir/test045.log"
+
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f \
+ $nops $cursors $delete $adds $keyavg \
+ $dataavg $dups $errpct $oargs\
+ > $testdir/test045.log
+
+ # Check for test failure
+ set logerrs [findfail $testdir/test045.log]
+ foreach str $logerrs {
+ puts "FAIL: error message in log file: $str"
+ }
+}
diff --git a/db-4.8.30/test/test046.tcl b/db-4.8.30/test/test046.tcl
new file mode 100644
index 0000000..917c5e5
--- /dev/null
+++ b/db-4.8.30/test/test046.tcl
@@ -0,0 +1,820 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test046
+# TEST Overwrite test of small/big key/data with cursor checks.
+proc test046 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest046: Overwrite test with cursor and small/big key/data."
+ puts "\tTest046:\t$method $args"
+
+ if { [is_rrecno $method] == 1} {
+ puts "\tTest046: skipping for method $method."
+ return
+ }
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ if { [is_record_based $method] == 1} {
+ set key ""
+ }
+
+ puts "\tTest046: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test046
+ set env NULL
+ } else {
+ set testfile test046
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile.a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1} {
+ set ret [eval {$db put} $txn {$i $data$i}]
+ } elseif { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]00$i \
+ [set data]00$i]
+ } elseif { $i < 100 } {
+ set ret [eval {$db put} $txn [set key]0$i \
+ [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest046.a: Deletes by key."
+ puts "\t\tTest046.a.1: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr $ret
+
+ # delete before cursor(n-1), make sure it is gone
+ set i [expr $i - 1]
+ error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0
+
+ # use set_range to get first key starting at n-1, should
+ # give us nth--but only works for btree
+ if { [is_btree $method] == 1 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([incr i])]
+ incr i -1
+ }
+ error_check_bad dbc_get:set(R)(post-delete) [llength $ret] 0
+ error_check_good dbc_get(match):set $ret $curr
+
+ puts "\t\tTest046.a.2: Delete cursor item by key."
+ # nth key, which cursor should be on now
+ set i [incr i]
+ set ret [eval {$db del} $txn {$key_set($i)}]
+ error_check_good db_del $ret 0
+
+ # this should return n+1 key/data, curr has nth key/data
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i+1])]
+ }
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\t\tTest046.a.3: Delete item after cursor."
+ # we'll delete n+2, since we have deleted n-1 and n
+ # i still equal to nth, cursor on n+1
+ set i [incr i]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr [$dbc get -next]
+ error_check_bad dbc_get:next [llength $curr] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $curr] 0
+ # delete *after* cursor pos.
+ error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0
+
+ # make sure item is gone, try to get it
+ if { [string compare $omethod "-btree"] == 0} {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i +1])]
+ }
+ error_check_bad dbc_get:set(_range) [llength $ret] 0
+ error_check_bad dbc_get:set(_range) $ret $curr
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 0] \
+ $key_set([expr $i+1])
+
+ puts "\tTest046.b: Deletes by cursor."
+ puts "\t\tTest046.b.1: Delete, do DB_NEXT."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [expr $i+2]
+ # i = n+4
+ error_check_good dbc_get:next(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.2: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [expr $i-3]
+ # i = n+1 (deleted all in between)
+ error_check_good dbc_get:prev(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.3: Delete, do DB_CURRENT."
+ error_check_good dbc:del [$dbc del] 0
+ # we just deleted, so current item should be KEYEMPTY, throws err
+ set ret [$dbc get -current]
+ error_check_good dbc_get:curr:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:get:current [catch {$dbc get -current} ret] 1
+ #error_check_good dbc_get:curr:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+
+ puts "\tTest046.c: Inserts (before/after), by key then cursor."
+ puts "\t\tTest046.c.1: Insert by key before the cursor."
+ # i is at curs pos, i=n+1, we want to go BEFORE
+ set i [incr i -1]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:before $ret 0
+
+ puts "\t\tTest046.c.2: Insert by key after the cursor."
+ set i [incr i +2]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:after $ret 0
+
+ puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
+ # cursor is on n+1, we'll change i to match
+ set i [incr i -1]
+
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+ if { [is_record_based $method] == 1} {
+ puts "\t\tSkipping the rest of test for method $method."
+ puts "\tTest046 ($method) complete."
+ return
+ } else {
+ # Reopen without printing __db_errs.
+ set db [eval {berkdb_open_noerr} $oflags $testfile.a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ # should fail with EINVAL (deleted cursor)
+ set errorCode NONE
+ error_check_good catch:put:before 1 \
+ [catch {$dbc put -before $data_set($i)} ret]
+ error_check_good dbc_put:deleted:before \
+ [is_substr $errorCode "EINVAL"] 1
+
+ # should fail with EINVAL
+ set errorCode NONE
+ error_check_good catch:put:after 1 \
+ [catch {$dbc put -after $data_set($i)} ret]
+ error_check_good dbc_put:deleted:after \
+ [is_substr $errorCode "EINVAL"] 1
+
+ puts "\t\tTest046.c.4:\
+ Insert by cursor before/after existent cursor."
+ # can't use before after w/o dup except renumber in recno
+ # first, restore an item so they don't fail
+ #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ #error_check_good db_put $ret 0
+
+ #set ret [$dbc get -set $key_set($i)]
+ #error_check_bad dbc_get:set [llength $ret] 0
+ #set i [incr i -2]
+ # i = n - 1
+ #set ret [$dbc get -prev]
+ #set ret [$dbc put -before $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:before $ret 0
+ # cursor pos is adjusted to match prev, recently inserted
+ #incr i
+ # i = n
+ #set ret [$dbc put -after $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:after $ret 0
+ }
+
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile.d.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set nkeys 20
+
+ # Prepare cursor on item
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of key/data
+ foreach ptype {init over} {
+ foreach size {big small} {
+ if { [string compare $size big] == 0 } {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 250]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 250]
+ } else {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 10]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 10]
+ }
+ }
+ }
+
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type {key_over curs_over} {
+ # Overwrite (i=initial) four different kinds of pairs
+ incr i
+ puts "\tTest046.d: Overwrites $type."
+ foreach i_pair {\
+ {small small} {big small} {small big} {big big} } {
+ # Overwrite (w=write) with four different kinds of data
+ foreach w_pair {\
+ {small small} {big small} {small big} {big big} } {
+
+ # we can only overwrite if key size matches
+ if { [string compare [lindex \
+ $i_pair 0] [lindex $w_pair 0]] != 0} {
+ continue
+ }
+
+ # first write the initial key/data
+ set ret [$dbc put -keyfirst \
+ key_init[lindex $i_pair 0] \
+ data_init[lindex $i_pair 1]]
+ error_check_good \
+ dbc_put:curr:init:$i_pair $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ data_init[lindex $i_pair 1]
+
+ # Now, try to overwrite: dups not supported in
+ # this db
+ if { [string compare $type key_over] == 0 } {
+ puts "\t\tTest046.d.$i: Key\
+ Overwrite:($i_pair) by ($w_pair)."
+ set ret [eval {$db put} $txn \
+ $"key_init[lindex $i_pair 0]" \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [eval {$db get} $txn \
+ $"key_init[lindex $i_pair 0]"]
+ error_check_bad \
+ db:get:check [llength $ret] 0
+ error_check_good db:get:compare_data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ } else {
+ # This is a cursor overwrite
+ puts \
+ "\t\tTest046.d.$i:Curs Overwrite:($i_pair) by ($w_pair)."
+ set ret [$dbc put -current \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbcput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [$dbc get -current]
+ error_check_bad \
+ dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ }
+ } ;# foreach write pair
+ } ;# foreach initial pair
+ } ;# foreach type big/small
+
+ puts "\tTest046.d.3: Cleanup for next part of test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\tSkipping the rest of Test046 for method $method."
+ puts "\tTest046 complete."
+ return
+ }
+
+ puts "\tTest046.e.1: Open db with sorted dups."
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+ set ndups 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.e.2:\
+ Put $nkeys small key/data pairs and $ndups sorted dups."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]0$i [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put 20 sorted duplicates on key in middle of page
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+
+ set keym $key_set($i)
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}]
+ } else {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_$i}]
+ }
+ error_check_good db_put:DUP($i) $ret 0
+ }
+
+ puts "\tTest046.e.3: Check duplicate duplicates"
+ set ret [eval {$db put} $txn {$keym DUPLICATE_00}]
+ error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
+
+ # get dup ordering
+ for {set i 0; set ret [$dbc get -set $keym]} { [llength $ret] != 0} {\
+ set ret [$dbc get -nextdup] } {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put cursor on item in middle of dups
+ set i [expr $ndups/2]
+ set ret [$dbc get -get_both $keym $dup_set($i)]
+ error_check_bad dbc_get:get_both [llength $ret] 0
+
+ puts "\tTest046.f: Deletes by cursor."
+ puts "\t\tTest046.f.1: Delete by cursor, do a DB_NEXT, check cursor."
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:nextdup [lindex [lindex $ret 0] 1] $dup_set([incr i])
+
+ puts "\t\tTest046.f.2: Delete by cursor, do DB_PREV, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [incr i -2]
+ error_check_good dbc_get:prev [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.f.3: Delete by cursor, do DB_CURRENT, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:dbc_get:curr [catch {$dbc get -current} ret] 1
+ #error_check_good \
+ # dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # restore deleted keys
+ error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0
+ error_check_good db_put:2 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ error_check_good db_put:3 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # tested above
+
+ # Reopen database without __db_err, reset cursor
+ error_check_good dbclose [$db close] 0
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret2 [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret2] 0
+ # match
+ error_check_good dbc_get:current/set(match) $ret $ret2
+ # right one?
+ error_check_good \
+ dbc_get:curr/set(matchdup) [lindex [lindex $ret 0] 1] $dup_set(0)
+
+ # cursor is on first dup
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # now on second dup
+ error_check_good dbc_get:next [lindex [lindex $ret 0] 1] $dup_set(1)
+ # check cursor
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbcget:curr(compare) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\tTest046.g: Inserts."
+ puts "\t\tTest046.g.1: Insert by key before cursor."
+ set i 0
+
+ # use "spam" to prevent a duplicate duplicate.
+ set ret [eval {$db put} $txn {$keym $dup_set($i)spam}]
+ error_check_good db_put:before $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:current(post-put) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.2: Insert by key after cursor."
+ set i [expr $i + 2]
+ # use "eggs" to prevent a duplicate duplicate
+ set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}]
+ error_check_good db_put:after $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr(post-put,after) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.3: Insert by curs before/after curs (should fail)."
+ # should return EINVAL (dupsort specified)
+ error_check_good dbc_put:before:catch \
+ [catch {$dbc put -before $dup_set([expr $i -1])} ret] 1
+ error_check_good \
+ dbc_put:before:deleted [is_substr $errorCode "EINVAL"] 1
+ error_check_good dbc_put:after:catch \
+ [catch {$dbc put -after $dup_set([expr $i +2])} ret] 1
+ error_check_good \
+ dbc_put:after:deleted [is_substr $errorCode "EINVAL"] 1
+
+ puts "\tTest046.h: Cursor overwrites."
+ puts "\t\tTest046.h.1: Test that dupsort disallows current overwrite."
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ catch:dbc_put:curr [catch {$dbc put -current DATA_OVERWRITE} ret] 1
+ error_check_good dbc_put:curr:dupsort [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest046.h.2: New db (no dupsort)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Skip the rest of the test for compressed btree, now that
+ # we're no longer running with -dupsort.
+ if { [is_compressed $args] == 1 } {
+ puts "Skipping remainder of test046\
+ for btree with compression."
+ return
+ }
+
+ set db [eval {berkdb_open} \
+ $oflags -dup $testfile.h.db]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ for {set i 0} {$i < $nkeys} {incr i} {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {key0$i datum0$i}]
+ error_check_good db_put $ret 0
+ } else {
+ set ret [eval {$db put} $txn {key$i datum$i}]
+ error_check_good db_put $ret 0
+ }
+ if { $i == 0 } {
+ for {set j 0} {$j < $ndups} {incr j} {
+ if { $i < 10 } {
+ set keyput key0$i
+ } else {
+ set keyput key$i
+ }
+ if { $j < 10 } {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum0$j}]
+ } else {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum$j}]
+ }
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ }
+
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ for {set i 0; set ret [$dbc get -set key00]} {\
+ [llength $ret] != 0} {set ret [$dbc get -nextdup]} {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+ set i 0
+ set keym key0$i
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ dbc_get:set(match) [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ set ret [$dbc get -nextdup]
+ error_check_bad dbc_get:nextdup [llength $ret] 0
+ error_check_good dbc_get:nextdup(match) \
+ [lindex [lindex $ret 0] 1] $dup_set([expr $i + 1])
+
+ puts "\t\tTest046.h.3: Insert by cursor before cursor (DB_BEFORE)."
+ set ret [$dbc put -before BEFOREPUT]
+ error_check_good dbc_put:before $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr:match [lindex [lindex $ret 0] 1] BEFOREPUT
+ # make sure that this is actually a dup w/ dup before
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good dbc_get:prev:match \
+ [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -prev]
+ # should not be a dup
+ error_check_bad dbc_get:prev(no_dup) \
+ [lindex [lindex $ret 0] 0] $keym
+
+ puts "\t\tTest046.h.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -set $keym]
+
+ # delete next 3 when fix
+ #puts "[$dbc get -current]\
+ # [$dbc get -next] [$dbc get -next] [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set $keym]
+
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret [$dbc put -after AFTERPUT]
+ error_check_good dbc_put:after $ret 0
+ #puts [$dbc get -current]
+
+ # delete next 3 when fix
+ #set ret [$dbc get -set $keym]
+ #puts "[$dbc get -current] next: [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set AFTERPUT]
+ #set ret [$dbc get -set $keym]
+ #set ret [$dbc get -next]
+ #puts $ret
+
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:match [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -prev]
+ # now should be on first item (non-dup) of keym
+ error_check_bad dbc_get:prev1 [llength $ret] 0
+ error_check_good \
+ dbc_get:match [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:match2 [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # this is the dup we added previously
+ error_check_good \
+ dbc_get:match3 [lindex [lindex $ret 0] 1] BEFOREPUT
+
+ # now get rid of the dups we added
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev2 [llength $ret] 0
+ error_check_good dbc_del2 [$dbc del] 0
+ # put cursor on first dup item for the rest of test
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good \
+ dbc_get:first:check [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.h.5: Overwrite small by small."
+ set ret [$dbc put -current DATA_OVERWRITE]
+ error_check_good dbc_put:current:overwrite $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/small) \
+ [lindex [lindex $ret 0] 1] DATA_OVERWRITE
+
+ puts "\t\tTest046.h.6: Overwrite small with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite:big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE] 1
+
+ puts "\t\tTest046.h.7: Overwrite big with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE2[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite(2):big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE2] 1
+
+ puts "\t\tTest046.h.8: Overwrite big with small."
+ set ret [$dbc put -current DATA_OVERWRITE2]
+ error_check_good dbc_put:current:overwrite:small $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/small) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_OVERWRITE2] 1
+
+ puts "\tTest046.i: Cleaning up from test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest046 complete."
+}
diff --git a/db-4.8.30/test/test047.tcl b/db-4.8.30/test/test047.tcl
new file mode 100644
index 0000000..24fe8c3
--- /dev/null
+++ b/db-4.8.30/test/test047.tcl
@@ -0,0 +1,261 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test047
+# TEST DBcursor->c_get get test with SET_RANGE option.
+proc test047 { method args } {
+ source ./include.tcl
+
+ set tnum 047
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+ set method "-btree"
+
+ puts "\tTest$tnum: Test of SET_RANGE interface to DB->c_get ($method)."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tnum.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set testfile1 $testdir/test$tnum.a.db
+ set testfile2 $testdir/test$tnum.b.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ set testfile1 test$tnum.a.db
+ set testfile2 test$tnum.b.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 -dup $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 20
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest$tnum.c: Get data with SET_RANGE, then delete by cursor."
+ set i 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ set curr $ret
+
+ # delete by cursor, make sure it is gone
+ error_check_good dbc_del [$dbc del] 0
+
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\tTest$tnum.d: \
+ Use another cursor to fix item on page, delete by db."
+ set dbcurs2 [eval {$db cursor} $txn]
+ error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE
+
+ set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
+ error_check_bad dbc_get(2):set [llength $ret] 0
+ set curr $ret
+ error_check_good db:del [eval {$db del} $txn \
+ {[lindex [lindex $ret 0] 0]}] 0
+
+ # make sure item is gone
+ set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ error_check_bad dbc2_get:set_range $ret $curr
+
+ puts "\tTest$tnum.e: Close for second part of test, close db/cursors."
+ error_check_good dbc:close [$dbc close] 0
+ error_check_good dbc2:close [$dbcurs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ # open db
+ set db [eval {berkdb_open} $oflags $testfile1]
+ error_check_good dbopen2 [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tnum.f: Fill page with $nkeys pairs, one set of dups."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set j 0
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a dup set for same 1 key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i DUP_$data$i}]
+ error_check_good dbput($i):dup $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest$tnum.g: \
+ Get dups key w/ SET_RANGE, pin onpage with another cursor."
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+
+ error_check_good dbc_compare $ret $ret2
+ puts "\tTest$tnum.h: \
+ Delete duplicates' key, use SET_RANGE to get next dup."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $ret2
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile2]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ set ndups 1000
+
+ puts "\tTest$tnum.i: Fill page with $nkeys pairs and $ndups dups."
+ for {set i 0} { $i < $nkeys } { incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+
+ # dups for single pair
+ if { $i == 0} {
+ for {set j 0} { $j < $ndups } { incr j } {
+ set ret [eval {$db put} $txn \
+ {$key$i DUP_$data$i:$j}]
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ puts "\tTest$tnum.j: \
+ Get key of first dup with SET_RANGE, fix with 2 curs."
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ set curr $ret2
+
+ error_check_good dbc_compare $ret $ret2
+
+ puts "\tTest$tnum.k: Delete item by cursor, use SET_RANGE to verify."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $curr
+
+ puts "\tTest$tnum.l: Cleanup."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum complete."
+}
diff --git a/db-4.8.30/test/test048.tcl b/db-4.8.30/test/test048.tcl
new file mode 100644
index 0000000..4ac9681
--- /dev/null
+++ b/db-4.8.30/test/test048.tcl
@@ -0,0 +1,178 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test048
+# TEST Cursor stability across Btree splits.
+proc test048 { method args } {
+ global errorCode
+ global is_je_test
+ source ./include.tcl
+
+ set tnum 048
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tnum skipping for method $method."
+ return
+ }
+
+ # Compression will change the behavior of page splits.
+ # Skip test for compression.
+ if { [is_compressed $args] } {
+ puts "Test$tnum skipping for compression"
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test048: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tnum: Test of cursor stability across btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tnum.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tnum.c: Set cursors on each of $nkeys pairs."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0; set ret [$db get key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i \
+ [is_valid_cursor $dbc_set($i) $db] TRUE
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tnum.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [eval {$db put} $txn {key0$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {key00$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tnum.e: Make sure split happened."
+ # XXX We cannot call stat with active txns or we deadlock.
+ if { $txnenv != 1 && !$is_je_test } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tnum.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tnum.g: Delete added keys to force reverse split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key0$i}] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key00$i}] 0
+ } else {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key000$i}] 0
+ }
+ }
+
+ puts "\tTest$tnum.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tnum.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ puts "\tTest$tnum.j: Verify reverse split."
+ error_check_good stat:check-reverse_split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tnum complete."
+}
diff --git a/db-4.8.30/test/test049.tcl b/db-4.8.30/test/test049.tcl
new file mode 100644
index 0000000..32fdbb0
--- /dev/null
+++ b/db-4.8.30/test/test049.tcl
@@ -0,0 +1,186 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test049
+# TEST Cursor operations on uninitialized cursors.
+proc test049 { method args } {
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tnum 049
+ set renum [is_rrecno $method]
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest$tnum: Test of cursor routines with uninitialized cursors."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+ set rflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ }
+
+ puts "\tTest$tnum.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $rflags $omethod $args"
+ if { [is_record_based $method] == 0 &&\
+ [is_rbtree $method] != 1 && [is_compressed $args] == 0 } {
+ append oflags " -dup"
+ }
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput:$i $ret 0
+ if { $i == 1 } {
+ for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
+ set ret [eval {$db put} $txn \
+ {$key$i DUPLICATE$j}]
+ error_check_good dbput:dup:$j $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # DBC GET
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE
+
+ puts "\tTest$tnum.c: Test dbc->get interfaces..."
+ set i 0
+ foreach flag { current first last next prev nextdup} {
+ puts "\t\t...dbc->get($flag)"
+ catch {$dbc_u get -$flag} ret
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ foreach flag { set set_range get_both} {
+ puts "\t\t...dbc->get($flag)"
+ if { [string compare $flag get_both] == 0} {
+ catch {$dbc_u get -$flag $key$i data0} ret
+ } else {
+ catch {$dbc_u get -$flag $key$i} ret
+ }
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ puts "\t\t...dbc->get(current, partial)"
+ catch {$dbc_u get -current -partial {0 0}} ret
+ error_check_good dbc:get:partial [is_substr $errorCode EINVAL] 1
+
+ puts "\t\t...dbc->get(current, rmw)"
+ catch {$dbc_u get -rmw -current } ret
+ error_check_good dbc_get:rmw [is_substr $errorCode EINVAL] 1
+
+ puts "\tTest$tnum.d: Test dbc->put interface..."
+ # partial...depends on another
+ foreach flag { after before current keyfirst keylast } {
+ puts "\t\t...dbc->put($flag)"
+ if { [string match key* $flag] == 1 } {
+ if { [is_record_based $method] == 1 ||\
+ [is_compressed $args] == 1 } {
+ # Keyfirst/keylast not allowed.
+ puts "\t\t...Skipping dbc->put($flag)."
+ continue
+ } else {
+ # keyfirst/last should succeed
+ puts "\t\t...dbc->put($flag)...should succeed."
+ error_check_good dbcput:$flag \
+ [$dbc_u put -$flag $key$i data0] 0
+
+ # now uninitialize cursor
+ error_check_good dbc_close [$dbc_u close] 0
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good \
+ db_cursor [is_substr $dbc_u $db] 1
+ }
+ } elseif { [string compare $flag before ] == 0 ||
+ [string compare $flag after ] == 0 } {
+ if { [is_record_based $method] == 0 &&\
+ [is_rbtree $method] == 0 &&\
+ [is_compressed $args] == 0} {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ } elseif { $renum == 1 } {
+ # Renumbering recno will return a record number
+ set currecno \
+ [lindex [lindex [$dbc_u get -current] 0] 0]
+ set ret [$dbc_u put -$flag data0]
+ if { [string compare $flag after] == 0 } {
+ error_check_good "$dbc_u put $flag" \
+ $ret [expr $currecno + 1]
+ } else {
+ error_check_good "$dbc_u put $flag" \
+ $ret $currecno
+ }
+ } else {
+ puts "\t\tSkipping $flag for $method"
+ }
+ } else {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ }
+ }
+ # and partial
+ puts "\t\t...dbc->put(partial)"
+ catch {$dbc_u put -partial {0 0} $key$i $data$i} ret
+ error_check_good dbc_put:partial [is_substr $errorCode EINVAL] 1
+
+ # XXX dbc->dup, db->join (dbc->get join_item)
+ # dbc del
+ puts "\tTest$tnum.e: Test dbc->del interface."
+ catch {$dbc_u del} ret
+ error_check_good dbc_del [is_substr $errorCode EINVAL] 1
+
+ error_check_good dbc_close [$dbc_u close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum complete."
+}
diff --git a/db-4.8.30/test/test050.tcl b/db-4.8.30/test/test050.tcl
new file mode 100644
index 0000000..74b509a
--- /dev/null
+++ b/db-4.8.30/test/test050.tcl
@@ -0,0 +1,220 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test050
+# TEST Overwrite test of small/big key/data with cursor checks for Recno.
+proc test050 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 050
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_rrecno $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ puts "\tTest$tstn:\
+ Overwrite test with cursor and small/big key/data ($method)."
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # verify ordering: should be unnecessary, but hey, why take chances?
+ # key_set is zero indexed but keys start at 1
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good \
+ verify_order:$i $key_set($i) [pad_data $method [expr $i+1]]
+ }
+
+ puts "\tTest$tstn.a: Inserts before/after by cursor."
+ puts "\t\tTest$tstn.a.1:\
+ Insert with uninitialized cursor (should fail)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ catch {$dbc put -before DATA1} ret
+ error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
+
+ catch {$dbc put -after DATA2} ret
+ error_check_good dbc_put:after:uninit [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest$tstn.a.2: Insert with deleted cursor (should succeed)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc put -current DATAOVER1]
+ error_check_good dbc_put:current:deleted $ret 0
+
+ puts "\t\tTest$tstn.a.3: Insert by cursor before cursor (DB_BEFORE)."
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -before DATAPUTBEFORE]
+ error_check_good dbc_put:before $ret $currecno
+ set old1 [$dbc get -next]
+ error_check_bad dbc_get:next [llength $old1] 0
+ error_check_good \
+ dbc_get:next(compare) [lindex [lindex $old1 0] 1] DATAOVER1
+
+ puts "\t\tTest$tstn.a.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -after DATAPUTAFTER]
+ error_check_good dbc_put:after $ret [expr $currecno + 1]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good \
+ dbc_get:prev [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+
+ puts "\t\tTest$tstn.a.5: Verify that all keys have been renumbered."
+ # should be $nkeys + 2 keys, starting at 1
+ for {set i 1; set ret [$dbc get -first]} { \
+ $i <= $nkeys && [llength $ret] != 0 } {\
+ incr i; set ret [$dbc get -next]} {
+ error_check_good check_renumber $i [lindex [lindex $ret 0] 0]
+ }
+
+ # tested above
+
+ puts "\tTest$tstn.b: Overwrite tests (cursor and key)."
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ #
+ # we should have ($nkeys + 2) keys, ordered:
+ # DATAPUTBEFORE, DATAPUTAFTER, DATAOVER1, data1, ..., data$nkeys
+ #
+ # Prepare cursor on item
+ #
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of data
+ set databig DATA_BIG_[repeat alphabet 250]
+ set datasmall DATA_SMALL
+
+ # Now, we want to overwrite data:
+ # by key and by cursor
+ # 1. small by small
+ # 2. small by big
+ # 3. big by small
+ # 4. big by big
+ #
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type { by_key by_cursor } {
+ incr i
+ puts "\tTest$tstn.b.$i: Overwrites $type."
+ foreach pair { {small small} \
+ {small big} {big small} {big big} } {
+ # put in initial type
+ set data $data[lindex $pair 0]
+ set ret [$dbc put -current $data]
+ error_check_good dbc_put:curr:init:($pair) $ret 0
+
+ # Now, try to overwrite: dups not supported in this db
+ if { [string compare $type by_key] == 0 } {
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair):$type"
+ set ret [eval {$db put} $txn \
+ 1 {OVER$pair$data[lindex $pair 1]}]
+ error_check_good dbput:over:($pair) $ret 0
+ } else {
+ # This is a cursor overwrite
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair) by cursor."
+ set ret [$dbc put \
+ -current OVER$pair$data[lindex $pair 1]]
+ error_check_good dbcput:over:($pair) $ret 0
+ }
+ } ;# foreach pair
+ } ;# foreach type key/cursor
+
+ puts "\tTest$tstn.c: Cleanup and close cursor."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
diff --git a/db-4.8.30/test/test051.tcl b/db-4.8.30/test/test051.tcl
new file mode 100644
index 0000000..e596473
--- /dev/null
+++ b/db-4.8.30/test/test051.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test051
+# TEST Fixed-length record Recno test.
+# TEST 0. Test various flags (legal and illegal) to open
+# TEST 1. Test partial puts where dlen != size (should fail)
+# TEST 2. Partial puts for existent record -- replaces at beg, mid, and
+# TEST end of record, as well as full replace
+proc test051 { method { args "" } } {
+ global fixed_len
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test051 ($method): Test of the fixed length records."
+ if { [is_fixed_length $method] != 1 } {
+ puts "Test051: skipping for method $method"
+ return
+ }
+ if { [is_partitioned $args] } {
+ puts "Test051 skipping for partitioned $omethod"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test051.db
+ set testfile1 $testdir/test051a.db
+ set env NULL
+ } else {
+ set testfile test051.db
+ set testfile1 test051a.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set oflags "-create -mode 0644 $args"
+
+ # Test various flags (legal and illegal) to open
+ puts "\tTest051.a: Test correct flag behavior on open."
+ set errorCode NONE
+ foreach f { "-dup" "-dup -dupsort" "-recnum" } {
+ puts "\t\tTest051.a: Test flag $f"
+ set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \
+ $testfile} ret]
+ error_check_good dbopen:flagtest:catch $stat 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ set errorCode NONE
+ }
+ set f "-renumber"
+ puts "\t\tTest051.a: Test $f"
+ if { [is_frecno $method] == 1 } {
+ set db [eval {berkdb_open} $oflags $f $omethod $testfile]
+ error_check_good dbopen:flagtest:$f [is_valid_db $db] TRUE
+ $db close
+ } else {
+ error_check_good \
+ dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\
+ $oflags $f $omethod $testfile} ret] 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ }
+
+ # Test partial puts where dlen != size (should fail)
+ # it is an error to specify a partial put w/ different
+ # dlen and size in fixed length recno/queue
+ set key 1
+ set data ""
+ set txn ""
+ set test_char "a"
+
+ set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\tTest051.b: Partial puts with dlen != size."
+ foreach dlen { 1 16 20 32 } {
+ foreach doff { 0 10 20 32 } {
+ # dlen < size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen+1]"
+ set data [repeat $test_char [expr $dlen + 1]]
+ error_check_good \
+ catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+
+ # We don't get back the server error string just
+ # the result.
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr \
+ $errorInfo "ecord length"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+
+ # dlen > size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen-1]"
+ set data [repeat $test_char [expr $dlen - 1]]
+ error_check_good \
+ catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen > size" \
+ [is_substr \
+ $errorInfo "ecord length"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+ }
+ }
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ $db close
+
+ # Partial puts for existent record -- replaces at beg, mid, and
+ # end of record, as well as full replace
+ puts "\tTest051.f: Partial puts within existent record."
+ set db [eval {berkdb_open} $oflags $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\t\tTest051.f: First try a put and then a full replace."
+ set data [repeat "a" $fixed_len]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+
+ set data [repeat "b" $fixed_len]
+ set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set data "InitialData"
+ set pdata "PUT"
+ set dlen [string length $pdata]
+ set ilen [string length $data]
+ set mid [expr $ilen/2]
+
+ # put initial data
+ set key 0
+
+ set offlist [list 0 $mid [expr $ilen -1] [expr $fixed_len - $dlen]]
+ puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
+ foreach doff $offlist {
+ incr key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput:init $ret 0
+
+ puts "\t\tTest051.g: Replace at offset $doff."
+ set ret [eval {$db put -partial [list $doff $dlen]} $txn \
+ {$key $pdata}]
+ error_check_good dbput:partial $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $doff == 0} {
+ set beg ""
+ set end [string range $data $dlen $ilen]
+ } else {
+ set beg [string range $data 0 [expr $doff - 1]]
+ set end [string range $data [expr $doff + $dlen] $ilen]
+ }
+ if { $doff > $ilen } {
+ # have to put padding between record and inserted
+ # string
+ set newdata [format %s%s $beg $end]
+ set diff [expr $doff - $ilen]
+ set nlen [string length $newdata]
+ set newdata [binary \
+ format a[set nlen]x[set diff]a$dlen $newdata $pdata]
+ } else {
+ set newdata [make_fixed_length \
+ frecno [format %s%s%s $beg $pdata $end]]
+ }
+ set ret [$db get -recno $key]
+ error_check_good compare($newdata,$ret) \
+ [binary_compare [lindex [lindex $ret 0] 1] $newdata] 0
+ }
+
+ $db close
+}
diff --git a/db-4.8.30/test/test052.tcl b/db-4.8.30/test/test052.tcl
new file mode 100644
index 0000000..e401912
--- /dev/null
+++ b/db-4.8.30/test/test052.tcl
@@ -0,0 +1,268 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test052
+# TEST Renumbering record Recno test.
+proc test052 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test052: Test of renumbering recno."
+ if { [is_rrecno $method] != 1} {
+ puts "Test052: skipping for method $method."
+ return
+ }
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest052: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test052.db
+ set env NULL
+ } else {
+ set testfile test052.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest052: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set keys($i) [lindex [lindex $ret 0] 0]
+ set darray($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest052: Deletes by key."
+ puts "\tTest052.a: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set k $keys($i)
+ set ret [$dbc get -set $k]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 1] $darray($i)
+
+ # delete by key before current
+ set i [incr i -1]
+ error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0
+ # with renumber, current's data should be constant, but key==--key
+ set i [incr i +1]
+ error_check_good dbc:data \
+ [lindex [lindex [$dbc get -current] 0] 1] $darray($i)
+ error_check_good dbc:keys \
+ [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1])
+
+ puts "\tTest052.b: Delete cursor item by key."
+ set i [expr $nkeys/2 ]
+
+ set ret [$dbc get -set $keys($i)]
+ error_check_bad dbc:get [llength $ret] 0
+ error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 1])
+ error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0
+ set ret [$dbc get -current]
+
+ # After a delete, cursor should return DB_NOTFOUND.
+ error_check_good dbc:get:key [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:get:data [llength [lindex [lindex $ret 0] 1]] 0
+
+ # And the item after the cursor should now be
+ # key: $nkeys/2, data: $nkeys/2 + 2
+ set ret [$dbc get -next]
+ error_check_bad dbc:getnext [llength $ret] 0
+ error_check_good dbc:getnext:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+ error_check_good dbc:getnext:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ puts "\tTest052.c: Delete item after cursor."
+ # should be { keys($nkeys/2), darray($nkeys/2 + 2) }
+ set i [expr $nkeys/2]
+ # deleting data for key after current (key $nkeys/2 + 1)
+ error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0
+
+ # current should be constant
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:current [llength $ret] 0
+ error_check_good dbc:get:keys [lindex [lindex $ret 0] 0] \
+ $keys($i)
+ error_check_good dbc:get:data [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 2])
+
+ puts "\tTest052: Deletes by cursor."
+ puts "\tTest052.d: Delete, do DB_NEXT."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i)
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current [llength $ret] 0
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc:get:curs \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # Move one more forward, so we're not on the first item.
+ error_check_bad dbc:getnext [llength [$dbc get -next]] 0
+
+ puts "\tTest052.e: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_good dbc:get:curr [llength $ret] 0
+
+ # next should now reference the record that was previously after
+ # old current
+ set ret [$dbc get -next]
+ error_check_bad get:next [llength $ret] 0
+ error_check_good dbc:get:next:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+ error_check_good dbc:get:next:keys \
+ [lindex [lindex $ret 0] 0] $keys([expr $i + 1])
+
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:get:curr:compare \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # The rest of the test was written with the old rrecno semantics,
+ # which required a separate c_del(CURRENT) test; to leave
+ # the database in the expected state, we now delete the first item.
+ set ret [$dbc get -first]
+ error_check_bad getfirst [llength $ret] 0
+ error_check_good delfirst [$dbc del] 0
+
+ puts "\tTest052: Inserts."
+ puts "\tTest052.g: Insert before (DB_BEFORE)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc:get:first [llength $ret] 0
+ error_check_good dbc_get:first \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:first:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+
+ set ret [$dbc put -before $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbc_put:before $ret $keys($i)
+ # cursor should adjust to point to new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_put:before:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_put:before:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc_get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 3])]]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+
+ puts "\tTest052.h: Insert by cursor after (DB_AFTER)."
+ set i [incr i]
+ set ret [$dbc put -after $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbcput:after $ret $keys($i)
+ # cursor should reference new item
+ set ret [$dbc get -current]
+ error_check_good dbc:get:current:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc:get:current:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ # items after curs should be adjusted
+ set ret [$dbc get -next]
+ error_check_bad dbc:get:next [llength $ret] 0
+ error_check_good dbc:get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]]
+
+ puts "\tTest052.i: Insert (overwrite) current item (DB_CURRENT)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ # choose a datum that is not currently in db
+ set ret [$dbc put -current $darray([expr $i + 2])]
+ error_check_good dbc_put:curr $ret 0
+ # curs should be on new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc_get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [incr i]
+ error_check_good dbc_get:next \
+ $ret [list [list $keys($i) $darray($i)]]
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest052 complete."
+}
diff --git a/db-4.8.30/test/test053.tcl b/db-4.8.30/test/test053.tcl
new file mode 100644
index 0000000..f75dea3
--- /dev/null
+++ b/db-4.8.30/test/test053.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test053
+# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+# TEST methods.
+proc test053 { method args } {
+ global alphabet
+ global errorCode
+ global is_je_test
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest053: Test of cursor stability across btree splits."
+ if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
+ puts "Test053: skipping for method $method."
+ return
+ }
+
+ if { [is_partition_callback $args] == 1 } {
+ puts "Test053: skipping for method $method with partition callback."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test053: skipping for specific pagesizes"
+ return
+ }
+
+ set txn ""
+ set flags ""
+
+ puts "\tTest053.a: Create $omethod $args database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test053.db
+ set env NULL
+ } else {
+ set testfile test053.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags \
+ "-create -revsplitoff -pagesize 1024 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 8
+ set npages 15
+
+ # We want to create a db with npages leaf pages, and have each page
+ # be near full with keys that we can predict. We set pagesize above
+ # to 1024 bytes, it should breakdown as follows (per page):
+ #
+ # ~20 bytes overhead
+ # key: ~4 bytes overhead, XXX0N where X is a letter, N is 0-9
+ # data: ~4 bytes overhead, + 100 bytes
+ #
+ # then, with 8 keys/page we should be just under 1024 bytes
+ puts "\tTest053.b: Create $npages pages with $nkeys pairs on each."
+ set keystring [string range $alphabet 0 [expr $npages -1]]
+ set data [repeat DATA 22]
+ for { set i 0 } { $i < $npages } {incr i } {
+ set key ""
+ set keyroot \
+ [repeat [string toupper [string range $keystring $i $i]] 3]
+ set key_set($i) $keyroot
+ for {set j 0} { $j < $nkeys} {incr j} {
+ if { $j < 10 } {
+ set key [set keyroot]0$j
+ } else {
+ set key $keyroot$j
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+# We really should not skip this test for partitioned dbs we need to
+# calculate how many pages there should be which is tricky if we
+# don't know where the keys are going to fall. If they are all
+# in one partition then we can subtract the extra leaf pages
+# in the extra partitions. The test further on should at least
+# check that the number of pages is the same as what is found here.
+ if { !$is_je_test && ![is_substr $args "-partition"] } {
+ puts "\tTest053.c: Check page count."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+ }
+
+ puts "\tTest053.d: Delete all but one key per page."
+ for {set i 0} { $i < $npages } {incr i } {
+ for {set j 1} { $j < $nkeys } {incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key_set($i)0$j}]
+ error_check_good dbdel $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ if { !$is_je_test && ![is_substr $args "-partition"] } {
+ puts "\tTest053.e: Check to make sure all pages are still there."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE
+
+ # walk cursor through tree forward, backward.
+ # delete one key, repeat
+ for {set i 0} { $i < $npages} {incr i} {
+ puts -nonewline \
+ "\tTest053.f.$i: Walk curs through tree: forward..."
+ for { set j $i; set curr [$dbc get -first]} { $j < $npages} { \
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts -nonewline "backward..."
+ for { set j [expr $npages - 1]; set curr [$dbc get -last]} { \
+ $j >= $i } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.f.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j + $i - 1])00
+ }
+ }
+ puts "\tTest053.g.$i:\
+ Delete single key ([expr $npages - $i] keys left)."
+ set ret [eval {$db del} $txn {$key_set($i)00}]
+ error_check_good dbdel $ret 0
+ error_check_good del:check \
+ [llength [eval {$db get} $txn {$key_set($i)00}]] 0
+ }
+
+ # end for loop, verify db_notfound
+ set ret [$dbc get -first]
+ error_check_good dbc:get:verify [llength $ret] 0
+
+ # loop: until single key restored on each page
+ for {set i 0} { $i < $npages} {incr i} {
+ puts "\tTest053.i.$i:\
+ Restore single key ([expr $i + 1] keys in tree)."
+ set ret [eval {$db put} $txn {$key_set($i)00 $data}]
+ error_check_good dbput $ret 0
+
+ puts -nonewline \
+ "\tTest053.j: Walk cursor through tree: forward..."
+ for { set j 0; set curr [$dbc get -first]} { $j <= $i} {\
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ error_check_good dbc:get:next [llength $curr] 0
+
+ puts -nonewline "backward..."
+ for { set j $i; set curr [$dbc get -last]} { \
+ $j >= 0 } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+ error_check_good dbc:get:prev [llength $curr] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.k.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $i + 1]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j - 1])00
+ }
+ }
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "Test053 complete."
+}
diff --git a/db-4.8.30/test/test054.tcl b/db-4.8.30/test/test054.tcl
new file mode 100644
index 0000000..69bb635
--- /dev/null
+++ b/db-4.8.30/test/test054.tcl
@@ -0,0 +1,459 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test054
+# TEST Cursor maintenance during key/data deletion.
+# TEST
+# TEST This test checks for cursor maintenance in the presence of deletes.
+# TEST There are N different scenarios to tests:
+# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K,
+# TEST do a next on the cursor.
+# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete
+# TEST of K, do a current get on K.
+# TEST 4. Repeat 3 but do a next instead of current.
+# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+# TEST does a delete. Then we do a non-cursor get.
+# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST do a delete of the entire Key. Test cursor current.
+# TEST 7. Continue last test and try cursor next.
+# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST Cursor B is in the same duplicate set and deletes a different item.
+# TEST Verify that the cursor is in the right place.
+# TEST 9. Cursors A and B are in the place in the same duplicate set. A
+# TEST deletes its item. Do current on B.
+# TEST 10. Continue 8 and do a next on B.
+proc test054 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644"
+ puts "Test054 ($method $args):\
+ interspersed cursor and normal operations"
+ if { [is_record_based $method] == 1 } {
+ puts "Test054 skipping for method $method"
+ return
+ }
+
+ # Find the environment in the argument list, we'll need it
+ # later.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-nodup.db
+ set env NULL
+ } else {
+ set testfile test054-nodup.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest054.a: No Duplicate Tests"
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest054.a1: Delete w/cursor, regular get"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [llength $r] 0
+
+ # Free up the cursor.
+ error_check_good cursor_close [eval {$curs close}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Test case #2.
+ puts "\tTest054.a2: Cursor before K, delete K, cursor next"
+
+ # Replace key 2
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Open and position cursor on first item.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [eval {$curs get} -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ set r [eval {$curs get} -set {$key_set(1)} ]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now delete (next item) $key_set(2)
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Test case #3.
+ puts "\tTest054.a3: Cursor on K, delete K, cursor current"
+
+ # delete item 3
+ error_check_good \
+ db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0
+ # NEEDS TO COME BACK IN, BUG CHECK
+ set ret [$curs get -current]
+ error_check_good current_after_del $ret ""
+ error_check_good cursor_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest054.a4: Cursor on K, delete K, cursor next"
+
+ # Restore keys 2 and 3
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Create the new cursor and put it on 1
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Delete 2
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now get ready for duplicate tests
+
+ if { [is_rbtree $method] == 1 || [is_compressed $args] == 1 } {
+ puts "Test054: skipping remainder of test for method $method."
+ return
+ }
+
+ puts "\tTest054.b: Duplicate Tests"
+ append args " -dup"
+
+ # Open a new database for the dup tests so -truncate is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-dup.db
+ set env NULL
+ } else {
+ set testfile test054-dup.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Test case #5.
+ puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [eval {$curs get} -set {$key_set(2)}]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
+
+ # Test case #6.
+ puts "\tTest054.b2: Now get the next duplicate from the cursor."
+
+ # Now do next on cursor
+ set r [$curs get -nextdup]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ # Test case #3.
+ puts "\tTest054.b3: Two cursors in set; each delete different items"
+
+ # Open a new cursor.
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set on last of duplicate set.
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_5
+
+ # Delete the item at cursor 1 (dup_1)
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify curs1 and curs2
+ # current should fail
+ set ret [$curs get -current]
+ error_check_good curs1_get_after_del $ret ""
+
+ set r [$curs2 get -current]
+ error_check_bad curs2_get [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good curs_get:DB_CURRENT:data $d dup_5
+
+ # Now delete the item at cursor 2 (dup_5)
+ error_check_good curs2_del [$curs2 del] 0
+
+ # Verify curs1 and curs2
+ set ret [$curs get -current]
+ error_check_good curs1_get:del2 $ret ""
+
+ set ret [$curs2 get -current]
+ error_check_good curs2_get:del2 $ret ""
+
+ # Now verify that next and prev work.
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_4
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ puts "\tTest054.b4: Two cursors same item, one delete, one get"
+
+ # Move curs2 onto dup_2
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_3
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_2
+
+ # delete on curs 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get:deleted $ret ""
+ set ret [$curs2 get -current]
+ error_check_good \
+ curs2_get:deleted $ret ""
+
+ puts "\tTest054.b5: Now do a next on both cursors"
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ set r [$curs2 get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ error_check_good curs2_close [$curs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test055.tcl b/db-4.8.30/test/test055.tcl
new file mode 100644
index 0000000..179cead
--- /dev/null
+++ b/db-4.8.30/test/test055.tcl
@@ -0,0 +1,140 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test055
+# TEST Basic cursor operations.
+# TEST This test checks basic cursor operations.
+# TEST There are N different scenarios to tests:
+# TEST 1. (no dups) Set cursor, retrieve current.
+# TEST 2. (no dups) Set cursor, retrieve next.
+# TEST 3. (no dups) Set cursor, retrieve prev.
+proc test055 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test055: $method interspersed cursor and normal operations"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test055.db
+ set env NULL
+ } else {
+ set testfile test055.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest055.a: No duplicates"
+ set db [eval {berkdb_open -create -mode 0644 $omethod } \
+ $args {$testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {\
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest055.a1: Set cursor, retrieve current"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve current
+ set r [$curs get -current]
+ error_check_bad cursor_get:DB_CURRENT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
+
+ # Test case #2.
+ puts "\tTest055.a2: Set cursor, retrieve previous"
+ set r [$curs get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(1)
+ error_check_good \
+ curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
+
+ # Test case #3.
+ puts "\tTest055.a2: Set cursor, retrieve next"
+
+ # Now set the cursor on the middle one.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve next
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good \
+ curs_get:DB_NEXT:data $d [pad_data $method datum$key_set(3)]
+
+ # Close cursor and database.
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test056.tcl b/db-4.8.30/test/test056.tcl
new file mode 100644
index 0000000..0fd93c9
--- /dev/null
+++ b/db-4.8.30/test/test056.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test056
+# TEST Cursor maintenance during deletes.
+# TEST Check if deleting a key when a cursor is on a duplicate of that
+# TEST key works.
+proc test056 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "Test056: skipping for method $method"
+ return
+ }
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test056 skipping for btree with compression."
+ return
+ }
+
+ puts "Test056: $method delete of key in presence of cursor"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test056.db
+ set env NULL
+ } else {
+ set testfile test056.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ puts "\tTest056.a: Key delete with cursor on duplicate."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on a duplicate of key 2
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do two nexts
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ # Now do the delete
+ set r [eval {$db del} $txn $flags {$key_set(2)}]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ set ret [$curs get -current]
+ error_check_good curs_after_del $ret ""
+
+ # Now check that the rest of the database looks intact. There
+ # should be only two keys, 1 and 3.
+
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ set r [$curs get -next]
+ error_check_good cursor_get:DB_NEXT [llength $r] 0
+
+ puts "\tTest056.b:\
+ Cursor delete of first item, followed by cursor FIRST"
+ # Set to beginning
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ # Now do delete
+ error_check_good curs_del [$curs del] 0
+
+ # Now do DB_FIRST
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(3)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test057.tcl b/db-4.8.30/test/test057.tcl
new file mode 100644
index 0000000..e70dd48
--- /dev/null
+++ b/db-4.8.30/test/test057.tcl
@@ -0,0 +1,207 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test057
+# TEST Cursor maintenance during key deletes.
+# TEST 1. Delete a key with a cursor. Add the key back with a regular
+# TEST put. Make sure the cursor can't get the new item.
+# TEST 2. Put two cursors on one item. Delete through one cursor,
+# TEST check that the other sees the change.
+# TEST 3. Same as 2, with the two cursors on a duplicate.
+
+proc test057 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test057: skipping for method $method"
+ return
+ }
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test057 skipping for btree with compression."
+ return
+ }
+
+ puts "Test057: $method delete and replace in presence of cursor."
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test057.db
+ set env NULL
+ } else {
+ set testfile test057.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good dbopen:dup [is_valid_db $db] TRUE
+
+ puts "\tTest057.a: Set cursor, delete cursor, put with key."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on key 1
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ error_check_good curs_get:del [$curs get -current] ""
+
+ # Now do a put on the key
+ set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}]
+ error_check_good put $r 0
+
+ # Do a get
+ set r [eval {$db get} $txn {$key_set(1)}]
+ error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1)
+
+ # Recheck cursor
+ error_check_good curs_get:deleted [$curs get -current] ""
+
+ # Move cursor and see if we get the key.
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d new_datum$key_set(1)
+
+ puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set both cursors on the 4rd key
+ set r [$curs get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(3)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(3)
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] ""
+ error_check_good curs_get:deleted [$curs2 get -current] ""
+
+ puts "\tTest057.c:\
+ Set two cursors on a dup, delete one, overwrite other"
+
+ # Set both cursors on the 2nd duplicate of key 2
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs2 get -set $key_set(2)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(2)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs2 get -next]
+ error_check_bad cursor2_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs2_get:DB_NEXT:data $d dup_1
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] ""
+ error_check_good curs_get:deleted [$curs2 get -current] ""
+
+ error_check_good curs2_close [$curs2 close] 0
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test058.tcl b/db-4.8.30/test/test058.tcl
new file mode 100644
index 0000000..b949836
--- /dev/null
+++ b/db-4.8.30/test/test058.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test058
+# TEST Verify that deleting and reading duplicates results in correct ordering.
+proc test058 { method args } {
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test058 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test058 skipping for btree with compression."
+ return
+ }
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test058: skipping for method $method"
+ return
+ }
+ puts "Test058: $method delete dups after inserting after duped key."
+
+ # environment
+ env_cleanup $testdir
+ set eflags "-create -txn $encargs -home $testdir"
+ set env [eval {berkdb_env} $eflags $pageargs]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # db open
+ set flags "-auto_commit -create -mode 0644 -dup -env $env $args"
+ set db [eval {berkdb_open} $flags $omethod "test058.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set tn ""
+ set tid ""
+ set tn [$env txn]
+ set tflags "-txn $tn"
+
+ puts "\tTest058.a: Adding 10 duplicates"
+ # Add a bunch of dups
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret \
+ [eval {$db put} $tflags {doghouse $i"DUPLICATE_DATA_VALUE"}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest058.b: Adding key after duplicates"
+ # Now add one more key/data AFTER the dup set.
+ set ret [eval {$db put} $tflags {zebrahouse NOT_A_DUP}]
+ error_check_good db_put $ret 0
+
+ error_check_good txn_commit [$tn commit] 0
+
+ set tn [$env txn]
+ error_check_good txnbegin [is_substr $tn $env] 1
+ set tflags "-txn $tn"
+
+ # Now delete everything
+ puts "\tTest058.c: Deleting duplicated key"
+ set ret [eval {$db del} $tflags {doghouse}]
+ error_check_good del $ret 0
+
+ # Now reput everything
+ set pad \
+ abcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuv
+
+ puts "\tTest058.d: Reputting duplicates with big data vals"
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret [eval {$db put} \
+ $tflags {doghouse $i"DUPLICATE_DATA_VALUE"$pad}]
+ error_check_good db_put $ret 0
+ }
+ error_check_good txn_commit [$tn commit] 0
+
+ # Check duplicates for order
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest058.e: Verifying that duplicates are in order."
+ set i 0
+ for { set ret [$dbc get -set doghouse] } \
+ {$i < 10 && [llength $ret] != 0} \
+ { set ret [$dbc get -nextdup] } {
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good \
+ duplicate_value $data $i"DUPLICATE_DATA_VALUE"$pad
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ reset_env $env
+}
diff --git a/db-4.8.30/test/test059.tcl b/db-4.8.30/test/test059.tcl
new file mode 100644
index 0000000..51b88d9
--- /dev/null
+++ b/db-4.8.30/test/test059.tcl
@@ -0,0 +1,149 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test059
+# TEST Cursor ops work with a partial length of 0.
+# TEST Make sure that we handle retrieves of zero-length data items correctly.
+# TEST The following ops, should allow a partial data retrieve of 0-length.
+# TEST db_get
+# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+proc test059 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test059: $method 0-length partial data retrieval"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test059.db
+ set env NULL
+ } else {
+ set testfile test059.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest059.a: Populate a database"
+ set oflags "-create -mode 0644 $omethod $args $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_create [is_substr $db db] 1
+
+ # Put ten keys in the database
+ for { set key 1 } { $key <= 10 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $pflags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good db_curs [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ puts "\tTest059.a: db get with 0 partial length retrieve"
+
+ # Now set the cursor on the middle one.
+ set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
+ error_check_bad db_get_0 [llength $ret] 0
+
+ puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
+ set ret [$curs get -first -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_first $key $key_set(1)
+ error_check_good db_cget_first [string length $data] 0
+
+ puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
+ set ret [$curs get -next -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_next $key $key_set(2)
+ error_check_good db_cget_next [string length $data] 0
+
+ puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
+ set ret [$curs get -last -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_last $key $key_set(10)
+ error_check_good db_cget_last [string length $data] 0
+
+ puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
+ set ret [$curs get -prev -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_prev $key $key_set(9)
+ error_check_good db_cget_prev [string length $data] 0
+
+ puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve"
+ set ret [$curs get -current -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_current $key $key_set(9)
+ error_check_good db_cget_current [string length $data] 0
+
+ puts "\tTest059.f: db cget SET with 0 partial length retrieve"
+ set ret [$curs get -set -partial {0 0} $key_set(7)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(7)
+ error_check_good db_cget_set [string length $data] 0
+
+ if {[is_btree $method] == 1} {
+ puts "\tTest059.g:\
+ db cget SET_RANGE with 0 partial length retrieve"
+ set ret [$curs get -set_range -partial {0 0} $key_set(5)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(5)
+ error_check_good db_cget_set [string length $data] 0
+ }
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test060.tcl b/db-4.8.30/test/test060.tcl
new file mode 100644
index 0000000..bb6ca8d
--- /dev/null
+++ b/db-4.8.30/test/test060.tcl
@@ -0,0 +1,59 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test060
+# TEST Test of the DB_EXCL flag to DB->open().
+# TEST 1) Attempt to open and create a nonexistent database; verify success.
+# TEST 2) Attempt to reopen it; verify failure.
+proc test060 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
+
+ # Set the database location and make sure the db doesn't exist yet
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test060.db
+ set env NULL
+ } else {
+ set testfile test060.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database and check success
+ puts "\tTest060.a: open and close non-existent file with DB_EXCL"
+ set db [eval {berkdb_open \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen:excl [is_valid_db $db] TRUE
+
+ # Close it and check success
+ error_check_good db_close [$db close] 0
+
+ # Try to open it again, and make sure the open fails
+ puts "\tTest060.b: open it again with DB_EXCL and make sure it fails"
+ set errorCode NONE
+ error_check_good open:excl:catch [catch { \
+ set db [eval {berkdb_open_noerr \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ } ret ] 1
+
+ error_check_good dbopen:excl [is_substr $errorCode EEXIST] 1
+}
diff --git a/db-4.8.30/test/test061.tcl b/db-4.8.30/test/test061.tcl
new file mode 100644
index 0000000..a352f22
--- /dev/null
+++ b/db-4.8.30/test/test061.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test061
+# TEST Test of txn abort and commit for in-memory databases.
+# TEST a) Put + abort: verify absence of data
+# TEST b) Put + commit: verify presence of data
+# TEST c) Overwrite + abort: verify that data is unchanged
+# TEST d) Overwrite + commit: verify that data has changed
+# TEST e) Delete + abort: verify that data is still present
+# TEST f) Delete + commit: verify that data has been deleted
+proc test061 { method args } {
+ global alphabet
+ global encrypt
+ global errorCode
+ global passwd
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test061 skipping for env $env"
+ return
+ }
+ if { [is_partitioned $args] == 1 } {
+ puts "Test061 skipping for partitioned $method"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1} {
+ puts "Test061 skipping for method $method"
+ return
+ }
+ puts "Test061: Transaction abort and commit test for in-memory data."
+ puts "Test061: $method $args"
+
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ set key "key"
+ set data "data"
+ set otherdata "otherdata"
+ set txn ""
+ set flags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1} {
+ set key 1
+ set gflags " -recno"
+ }
+
+ puts "\tTest061: Create environment and $method database."
+ env_cleanup $testdir
+
+ # create environment
+ set eflags "-create -txn $encargs -home $testdir"
+ set dbenv [eval {berkdb_env} $eflags $pageargs ]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # db open -- no file specified, in-memory database
+ set flags "-auto_commit -create $args $omethod"
+ set db [eval {berkdb_open -env} $dbenv $flags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here we go with the six test cases. Since we need to verify
+ # a different thing each time, and since we can't just reuse
+ # the same data if we're to test overwrite, we just
+ # plow through rather than writing some impenetrable loop code;
+ # each of the cases is only a few lines long, anyway.
+
+ puts "\tTest061.a: put/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for *non*-existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ puts "\tTest061.b: put/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check again for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.c: overwrite/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check that data is unchanged ($data not $otherdata)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.d: overwrite/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check that data has changed ($otherdata not $data)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.e: delete/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # delete
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.f: delete/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check for continued nonexistence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ # We're done; clean up.
+ error_check_good db_close [eval {$db close}] 0
+ error_check_good env_close [eval {$dbenv close}] 0
+
+ # Now run db_recover and ensure that it runs cleanly.
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set utilflag "-P $passwd"
+ }
+ puts "\tTest061.g: Running db_recover -h"
+ set ret [catch {eval {exec} $util_path/db_recover -h $testdir \
+ $utilflag} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover outputted $res"
+ }
+ error_check_good db_recover $ret 0
+
+ puts "\tTest061.h: Running db_recover -c -h"
+ set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \
+ $utilflag} res]
+ error_check_good db_recover-c $ret 0
+}
diff --git a/db-4.8.30/test/test062.tcl b/db-4.8.30/test/test062.tcl
new file mode 100644
index 0000000..3878ffd
--- /dev/null
+++ b/db-4.8.30/test/test062.tcl
@@ -0,0 +1,159 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test062
+# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages.
+# TEST Insert the first 200 words into the dictionary 200 times each with
+# TEST self as key and <random letter>:self as data. Use partial puts to
+# TEST append self again to data; verify correctness.
+proc test062 { method {nentries 200} {ndups 200} {tnum "062"} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $omethod"
+ return
+ }
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 200 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test$tnum:\
+ $method ($args) $nentries Partial puts and $ndups duplicates."
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest$tnum.a: Put loop (initialize database)"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set datastr $pref:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set keys($count) $str
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest$tnum.b: Partial puts."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+
+ # Do a partial write to extend each datum in
+ # the regular db by the corresponding dictionary word.
+ # We have to go through each key's dup set using -set
+ # because cursors are not stable in the hash AM and we
+ # want to make sure we hit all the keys.
+ for { set i 0 } { $i < $count } { incr i } {
+ set key $keys($i)
+ for {set ret [$dbc get -set $key]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set orig_d [lindex [lindex $ret 0] 1]
+ set d [string range $orig_d 2 end]
+ set doff [expr [string length $d] + 2]
+ set dlen 0
+ error_check_good data_and_key_sanity $d $k
+
+ set ret [$dbc get -current]
+ error_check_good before_sanity \
+ [lindex [lindex $ret 0] 0] \
+ [string range [lindex [lindex $ret 0] 1] 2 end]
+
+ error_check_good partial_put [eval {$dbc put -current \
+ -partial [list $doff $dlen] $d}] 0
+
+ set ret [$dbc get -current]
+ error_check_good partial_put_correct \
+ [lindex [lindex $ret 0] 1] $orig_d$d
+ }
+ }
+
+ puts "\tTest$tnum.c: Double-checking get loop."
+ # Double-check that each datum in the regular db has
+ # been appropriately modified.
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good modification_correct \
+ [string range $d 2 end] [repeat $k 2]
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test063.tcl b/db-4.8.30/test/test063.tcl
new file mode 100644
index 0000000..74ea8c6
--- /dev/null
+++ b/db-4.8.30/test/test063.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test063
+# TEST Test of the DB_RDONLY flag to DB->open
+# TEST Attempt to both DB->put and DBC->c_put into a database
+# TEST that has been opened DB_RDONLY, and check for failure.
+proc test063 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum "063"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "key"
+ set data "data"
+ set key2 "another_key"
+ set data2 "more_data"
+
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set key "1"
+ set key2 "2"
+ append gflags " -recno"
+ }
+
+ puts "Test$tnum: $method ($args) DB_RDONLY test."
+
+ # Create a test database.
+ puts "\tTest$tnum.a: Creating test database."
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ # Put and get an item so it's nonempty.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good initial_put $ret 0
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good initial_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { $eindex == -1 } {
+ # Confirm that database is writable. If we are
+ # using an env (that may be remote on a server)
+ # we cannot do this check.
+ error_check_good writable [file writable $testfile] 1
+ }
+
+ puts "\tTest$tnum.b: Re-opening DB_RDONLY and attempting to put."
+
+ # Now open it read-only and make sure we can get but not put.
+ set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ set ret [catch {eval {$db put} $txn \
+ {$key2 [chop_data $method $data]}} res]
+ error_check_good put_failed $ret 1
+ error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set errorCode "NONE"
+
+ puts "\tTest$tnum.c: Attempting cursor put."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good cursor_set [$dbc get -first] $dbt
+ set ret [catch {eval {$dbc put} -current $data} res]
+ error_check_good c_put_failed $ret 1
+ error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $gflags {$key2}]
+ error_check_good db_get_key2 $dbt ""
+
+ puts "\tTest$tnum.d: Attempting ordinary delete."
+
+ set errorCode "NONE"
+ set ret [catch {eval {$db del} $txn {$key}} 1]
+ error_check_good del_failed $ret 1
+ error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get_key $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest$tnum.e: Attempting cursor delete."
+ # Just set the cursor to the beginning; we don't care what's there...
+ # yet.
+ set dbt2 [$dbc get -first]
+ error_check_good db_get_first_key $dbt2 $dbt
+ set errorCode "NONE"
+ set ret [catch {$dbc del} res]
+ error_check_good c_del_failed $ret 1
+ error_check_good dbc_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt2 [$dbc get -current]
+ error_check_good db_get_key $dbt2 $dbt
+
+ puts "\tTest$tnum.f: Close, reopen db; verify unchanged."
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $omethod $args $testfile]
+ error_check_good db_reopen [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good first_there [$dbc get -first] \
+ [list [list $key [pad_data $method $data]]]
+ error_check_good nomore_there [$dbc get -next] ""
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test064.tcl b/db-4.8.30/test/test064.tcl
new file mode 100644
index 0000000..c2bbf4d
--- /dev/null
+++ b/db-4.8.30/test/test064.tcl
@@ -0,0 +1,68 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test064
+# TEST Test of DB->get_type
+# TEST Create a database of type specified by method.
+# TEST Make sure DB->get_type returns the right thing with both a normal
+# TEST and DB_UNKNOWN open.
+proc test064 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum "064"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test$tnum: $method ($args) DB->get_type test."
+
+ # Create a test database.
+ puts "\tTest$tnum.a: Creating test database of type $method."
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum.b: get_type after method specifier."
+
+ set db [eval {berkdb_open} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum.c: get_type after DB_UNKNOWN."
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test065.tcl b/db-4.8.30/test/test065.tcl
new file mode 100644
index 0000000..e1cea29
--- /dev/null
+++ b/db-4.8.30/test/test065.tcl
@@ -0,0 +1,207 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test065
+# TEST Test of DB->stat, both -DB_FAST_STAT and row
+# TEST counts with DB->stat -txn.
+proc test065 { method args } {
+ source ./include.tcl
+ global errorCode
+ global alphabet
+
+ set nentries 10000
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum "065"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test$tnum: $method ($args) DB->stat(DB_FAST_STAT) test."
+
+ puts "\tTest$tnum.a: Create database and check it while empty."
+
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set ret [catch {eval $db stat -faststat} res]
+
+ error_check_good db_close [$db close] 0
+
+ if { ([is_record_based $method] && ![is_queue $method]) \
+ || [is_rbtree $method] } {
+ error_check_good recordcount_ok [is_substr $res \
+ "{{Number of keys} 0}"] 1
+ } else {
+ puts "\tTest$tnum: Test complete for method $method."
+ return
+ }
+
+ # If we've got this far, we're on an access method for
+ # which record counts makes sense. Thus, we no longer
+ # catch EINVALs, and no longer care about __db_errs.
+ set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
+
+ puts "\tTest$tnum.b: put $nentries keys."
+
+ if { [is_record_based $method] } {
+ set gflags " -recno "
+ set keypfx ""
+ } else {
+ set gflags ""
+ set keypfx "key"
+ }
+
+ set txn ""
+ set data [pad_data $method $alphabet]
+
+ for { set ndx 1 } { $ndx <= $nentries } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put $ret 0
+ set statret [eval {$db stat} $txn]
+ set rowcount [getstats $statret "Number of records"]
+ error_check_good rowcount $rowcount $ndx
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ error_check_good recordcount_after_puts \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+
+ puts "\tTest$tnum.c: delete 90% of keys."
+ set end [expr {$nentries / 10 * 9}]
+ for { set ndx 1 } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_rrecno $method] == 1 } {
+ # if we're renumbering, when we hit key 5001 we'll
+ # have deleted 5000 and we'll croak! So delete key
+ # 1, repeatedly.
+ set ret [eval {$db del} $txn {[concat $keypfx 1]}]
+ set statret [eval {$db stat} $txn]
+ set rowcount [getstats $statret "Number of records"]
+ error_check_good rowcount $rowcount [expr $nentries - $ndx]
+ } else {
+ set ret [eval {$db del} $txn {$keypfx$ndx}]
+ set rowcount [getstats $statret "Number of records"]
+ error_check_good rowcount $rowcount $nentries
+ }
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
+ # We allow renumbering--thus the stat should return 10%
+ # of nentries.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10}]}"] 1
+ } else {
+ # No renumbering--no change in RECORDCOUNT!
+ error_check_good recordcount_after_dels \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest$tnum.d: put new keys at the beginning."
+ set end [expr {$nentries / 10 * 8}]
+ for { set ndx 1 } { $ndx <= $end } {incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_beginning $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 } {
+ # With renumbering we're back up to 80% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1
+ } elseif { [is_rbtree $method] == 1 } {
+ # Total records in a btree is now 90% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1
+ } else {
+ # No renumbering--still no change in RECORDCOUNT.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest$tnum.e: put new keys at the end."
+ set start [expr {1 + $nentries / 10 * 9}]
+ set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}]
+ for { set ndx $start } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_end $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rbtree $method] != 1 } {
+ # If this is a recno database, the record count should be up
+ # to (1.7 x nentries), the largest number we've seen, with
+ # or without renumbering.
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1
+ } else {
+ # In an rbtree, 1000 of those keys were overwrites, so there
+ # are (.7 x nentries) new keys and (.9 x nentries) old keys
+ # for a total of (1.6 x nentries).
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test066.tcl b/db-4.8.30/test/test066.tcl
new file mode 100644
index 0000000..9163ef6
--- /dev/null
+++ b/db-4.8.30/test/test066.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test066
+# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates.
+# TEST
+# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+# TEST database with duplicates.
+proc test066 { method args } {
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set tnum "066"
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Test$tnum: Skipping for method $method."
+ return
+ }
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+ puts "Test$tnum: Test of cursor put to DB_CURRENT with duplicates."
+
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test066.db
+ set env NULL
+ } else {
+ set testfile test066.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set txn ""
+ set key "test"
+ set data "olddata"
+
+ set db [eval {berkdb_open -create -mode 0644 -dup} $omethod $args \
+ $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -first]
+ error_check_good db_get $ret [list [list $key [pad_data $method $data]]]
+
+ set newdata "newdata"
+ set ret [$dbc put -current [chop_data $method $newdata]]
+ error_check_good dbc_put $ret 0
+
+ # There should be only one (key,data) pair in the database, and this
+ # is it.
+ set ret [$dbc get -first]
+ error_check_good db_get_first $ret \
+ [list [list $key [pad_data $method $newdata]]]
+
+ # and this one should come up empty.
+ set ret [$dbc get -next]
+ error_check_good db_get_next $ret ""
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tnum: Test completed successfully."
+}
diff --git a/db-4.8.30/test/test067.tcl b/db-4.8.30/test/test067.tcl
new file mode 100644
index 0000000..191c139
--- /dev/null
+++ b/db-4.8.30/test/test067.tcl
@@ -0,0 +1,163 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test067
+# TEST Test of DB_CURRENT partial puts onto almost empty duplicate
+# TEST pages, with and without DB_DUP_SORT.
+# TEST
+# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+# TEST This test was written to address the following issue, #2 in the
+# TEST list of issues relating to bug #0820:
+# TEST
+# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records
+# TEST first deletes the record and then puts the new one -- this could
+# TEST be a problem if the removal of the record causes a reverse split.
+# TEST Suggested solution is to acquire a cursor to lock down the current
+# TEST record, put a new record after that record, and then delete using
+# TEST the held cursor.
+# TEST
+# TEST It also tests the following, #5 in the same list of issues:
+# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+# TEST set, duplicate comparison routine specified.
+# TEST The partial change does not change how data items sort, but the
+# TEST record to be put isn't built yet, and that record supplied is the
+# TEST one that's checked for ordering compatibility.
+proc test067 { method {ndups 1000} {tnum "067"} args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+ global is_je_test
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txn ""
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndups == 1000 } {
+ set ndups 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest$tnum: skipping for method $method."
+ return
+ }
+
+ puts "Test$tnum:\
+ $method ($args) Partial puts on near-empty duplicate pages."
+
+ foreach dupopt { "-dup" "-dup -dupsort" } {
+ if { $is_je_test || [is_compressed $args] } {
+ if { $dupopt == "-dup" } {
+ continue
+ }
+ }
+
+ #
+ # Testdir might get reset from the env's home dir back
+ # to the default if this calls something that sources
+ # include.tcl, since testdir is a global. Set it correctly
+ # here each time through the loop.
+ #
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.a ($dupopt): Put $ndups duplicates."
+
+ set key "key_test$tnum"
+
+ for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
+ set data $alphabet$ndx
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # No need for pad_data since we're skipping recno.
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($key,$data) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Sync so we can inspect database if the next section bombs.
+ error_check_good db_sync [$db sync] 0
+ puts "\tTest$tnum.b ($dupopt):\
+ Deleting dups (last first), overwriting each."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ while { $count < $ndups - 1 } {
+ # set cursor to last item in db
+ set ret [$dbc get -last]
+ error_check_good \
+ verify_key [lindex [lindex $ret 0] 0] $key
+
+ # for error reporting
+ set currdatum [lindex [lindex $ret 0] 1]
+
+ # partial-overwrite it
+ # (overwrite offsets 1-4 with "bcde"--which they
+ # already are)
+
+ # Even though we expect success, we catch this
+ # since it might return EINVAL, and we want that
+ # to FAIL.
+ set errorCode NONE
+ set ret [catch {eval $dbc put -current \
+ {-partial [list 1 4]} "bcde"} \
+ res]
+ error_check_good \
+ partial_put_valid($currdatum) $errorCode NONE
+ error_check_good partial_put($currdatum) $res 0
+
+ # delete it
+ error_check_good dbc_del [$dbc del] 0
+
+ #puts $currdatum
+
+ incr count
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/db-4.8.30/test/test068.tcl b/db-4.8.30/test/test068.tcl
new file mode 100644
index 0000000..9206eb3
--- /dev/null
+++ b/db-4.8.30/test/test068.tcl
@@ -0,0 +1,233 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test068
+# TEST Test of DB_BEFORE and DB_AFTER with partial puts.
+# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+proc test068 { method args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+ global is_je_test
+
+ set tnum "068"
+ set orig_tdir $testdir
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test$tnum:\
+ $method ($args) Test of DB_BEFORE/DB_AFTER and partial puts."
+ if { [is_record_based $method] == 1 } {
+ puts "\tTest$tnum: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set nkeys 1000
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set nkeys 100
+ }
+ set testdir [get_home $env]
+ }
+
+ # Create a list of $nkeys words to insert into db.
+ puts "\tTest$tnum.a: Initialize word list."
+ set txn ""
+ set wordlist {}
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nkeys } {
+ lappend wordlist $str
+ incr count
+ }
+ close $did
+
+ # Sanity check: did we get $nkeys words?
+ error_check_good enough_keys [llength $wordlist] $nkeys
+
+ # rbtree can't handle dups, so just test the non-dup case
+ # if it's the current method.
+ if { [is_rbtree $method] == 1 } {
+ set dupoptlist { "" }
+ } else {
+ set dupoptlist { "" "-dup" "-dup -dupsort" }
+ }
+
+ foreach dupopt $dupoptlist {
+ if { $is_je_test || [is_compressed $args] == 1 } {
+ if { $dupopt == "-dup" } {
+ continue
+ }
+ }
+
+ # Testdir might be reset in the loop by some proc sourcing
+ # include.tcl. Reset it to the env's home here, before
+ # cleanup.
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open_noerr -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.b ($dupopt): DB initialization: put loop."
+ foreach word $wordlist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$word $word}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest$tnum.c ($dupopt): get loop."
+ foreach word $wordlist {
+ # Make sure that the Nth word has been correctly
+ # inserted, and also that the Nth word is the
+ # Nth one we pull out of the database using a cursor.
+
+ set dbt [$db get $word]
+ error_check_good get_key [list [list $word $word]] $dbt
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
+
+ # Set cursor to the first key; make sure it succeeds.
+ # With an unsorted wordlist, we can't be sure that the
+ # first item returned will equal the first item in the
+ # wordlist, so we just make sure it got something back.
+ set dbt [eval {$dbc get -first}]
+ error_check_good \
+ dbc_get_first [llength $dbt] 1
+
+ # If -dup is not set, or if -dupsort is set too, we
+ # need to verify that DB_BEFORE and DB_AFTER fail
+ # and then move on to the next $dupopt.
+ if { $dupopt != "-dup" } {
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -after \
+ {-partial [list 6 0]} "after"} res]
+ error_check_good dbc_put_after_fail $ret 1
+ error_check_good dbc_put_after_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest$tnum ($dupopt): DB_AFTER returns EINVAL."
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -before \
+ {-partial [list 6 0]} "before"} res]
+ error_check_good dbc_put_before_fail $ret 1
+ error_check_good dbc_put_before_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest$tnum ($dupopt): DB_BEFORE returns EINVAL."
+ puts "\tTest$tnum ($dupopt): Correct error returns,\
+ skipping further test."
+ # continue with broad foreach
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ continue
+ }
+
+ puts "\tTest$tnum.e ($dupopt): DBC->put(DB_AFTER) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put after it
+ set ret [$dbc put -after -partial {4 0} after]
+ error_check_good dbc_put_after $ret 0
+ }
+
+ puts "\tTest$tnum.f ($dupopt): DBC->put(DB_BEFORE) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put before it
+ set ret [$dbc put -before -partial {6 0} before]
+ error_check_good dbc_put_before $ret 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ eval $db sync
+ puts "\tTest$tnum.g ($dupopt): Verify correctness."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # loop through the whole db beginning to end,
+ # make sure we have, in order, {$word "\0\0\0\0\0\0before"},
+ # {$word $word}, {$word "\0\0\0\0after"} for each word.
+ set count 0
+ while { $count < $nkeys } {
+ # Get the first item of each set of three.
+ # We don't know what the word is, but set $word to
+ # the key and check that the data is
+ # "\0\0\0\0\0\0before".
+ set dbt [$dbc get -next]
+ set word [lindex [lindex $dbt 0] 0]
+
+ error_check_good dbc_get_one $dbt \
+ [list [list $word "\0\0\0\0\0\0before"]]
+
+ set dbt [$dbc get -next]
+ error_check_good \
+ dbc_get_two $dbt [list [list $word $word]]
+
+ set dbt [$dbc get -next]
+ error_check_good dbc_get_three $dbt \
+ [list [list $word "\0\0\0\0after"]]
+
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+ set testdir $orig_tdir
+}
diff --git a/db-4.8.30/test/test069.tcl b/db-4.8.30/test/test069.tcl
new file mode 100644
index 0000000..5e497cf
--- /dev/null
+++ b/db-4.8.30/test/test069.tcl
@@ -0,0 +1,13 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test069
+# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/
+# TEST small ndups to ensure that partial puts to DB_CURRENT work
+# TEST correctly in the absence of duplicate pages.
+proc test069 { method {ndups 50} {tnum "069"} args } {
+ eval test067 $method $ndups $tnum $args
+}
diff --git a/db-4.8.30/test/test070.tcl b/db-4.8.30/test/test070.tcl
new file mode 100644
index 0000000..2887456
--- /dev/null
+++ b/db-4.8.30/test/test070.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test070
+# TEST Test of DB_CONSUME (Four consumers, 1000 items.)
+# TEST
+# TEST Fork off six processes, four consumers and two producers.
+# TEST The producers will each put 20000 records into a queue;
+# TEST the consumers will each get 10000.
+# TEST Then, verify that no record was lost or retrieved twice.
+proc test070 { method {nconsumers 4} {nproducers 2} \
+ {nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum "070"} args } {
+ source ./include.tcl
+ global alphabet
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test$tnum skipping for security"
+ return
+ }
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ puts "Test$tnum: $method ($args) Test of DB_$mode flag to DB->get."
+ puts "\tUsing $txn environment."
+
+ error_check_good enough_consumers [expr $nconsumers > 0] 1
+ error_check_good enough_producers [expr $nproducers > 0] 1
+
+ if { [is_queue $method] != 1 } {
+ puts "\tSkipping Test$tnum for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+ set testfile test$tnum.db
+
+ # Create environment
+ set dbenv [eval {berkdb_env -create $txn -home } $testdir $pageargs]
+ error_check_good dbenv_create [is_valid_env $dbenv] TRUE
+
+ # Create database
+ set db [eval {berkdb_open -create -mode 0644 -queue}\
+ -env $dbenv $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $start != 0 } {
+ error_check_good set_seed [$db put $start "consumer data"] 0
+ puts "\tTest$tnum: starting at $start."
+ } else {
+ incr start
+ }
+
+ set pidlist {}
+
+ # Divvy up the total number of records amongst the consumers and
+ # producers.
+ error_check_good cons_div_evenly [expr $nitems % $nconsumers] 0
+ error_check_good prod_div_evenly [expr $nitems % $nproducers] 0
+ set nperconsumer [expr $nitems / $nconsumers]
+ set nperproducer [expr $nitems / $nproducers]
+
+ set consumerlog $testdir/CONSUMERLOG.
+
+ # Fork consumer processes (we want them to be hungry)
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set output $consumerlog$ndx
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.consumer$ndx \
+ $testdir $testfile $mode $nperconsumer $output $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+ for { set ndx 0 } { $ndx < $nproducers } { incr ndx } {
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.producer$ndx \
+ $testdir $testfile PRODUCE $nperproducer "" $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+
+ # Wait for all children.
+ watch_procs $pidlist 10
+
+ # Verify: slurp all record numbers into list, sort, and make
+ # sure each appears exactly once.
+ puts "\tTest$tnum: Verifying results."
+ set reclist {}
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set input $consumerlog$ndx
+ set iid [open $input r]
+ while { [gets $iid str] != -1 } {
+ lappend reclist $str
+ }
+ close $iid
+ }
+ set sortreclist [lsort -command int32_compare $reclist]
+
+ set nitems [expr $start + $nitems]
+ for { set ndx $start } { $ndx < $nitems } { set ndx [expr $ndx + 1] } {
+ # Wrap if $ndx goes beyond 32 bits because our
+ # recno wrapped if it did.
+ if { $ndx > 0xffffffff } {
+ set cmp [expr $ndx - 0xffffffff]
+ } else {
+ set cmp [expr $ndx + 0]
+ }
+ # Skip 0 if we are wrapping around
+ if { $cmp == 0 } {
+ incr ndx
+ incr nitems
+ incr cmp
+ }
+ # Be sure to convert ndx to a number before comparing.
+ error_check_good pop_num [lindex $sortreclist 0] $cmp
+ set sortreclist [lreplace $sortreclist 0 0]
+ }
+ error_check_good list_ends_empty $sortreclist {}
+ error_check_good db_close [$db close] 0
+ error_check_good dbenv_close [$dbenv close] 0
+
+ puts "\tTest$tnum completed successfully."
+}
diff --git a/db-4.8.30/test/test071.tcl b/db-4.8.30/test/test071.tcl
new file mode 100644
index 0000000..9c05502
--- /dev/null
+++ b/db-4.8.30/test/test071.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test071
+# TEST Test of DB_CONSUME (One consumer, 10000 items.)
+# TEST This is DB Test 70, with one consumer, one producers, and 10000 items.
+proc test071 { method {nconsumers 1} {nproducers 1} {nitems 10000} \
+ {mode CONSUME} {start 0 } {txn -txn} {tnum "071"} args } {
+
+ eval test070 $method \
+ $nconsumers $nproducers $nitems $mode $start $txn $tnum $args
+}
diff --git a/db-4.8.30/test/test072.tcl b/db-4.8.30/test/test072.tcl
new file mode 100644
index 0000000..816858f
--- /dev/null
+++ b/db-4.8.30/test/test072.tcl
@@ -0,0 +1,258 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test072
+# TEST Test of cursor stability when duplicates are moved off-page.
+proc test072 { method {pagesize 512} {ndups 20} {tnum "072"} args } {
+ source ./include.tcl
+ global alphabet
+ global is_je_test
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile name should just be
+ # the db name. Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "a key"
+ set key "the key"
+ set postkey "z key"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set postdatum "0987654321"
+
+ puts -nonewline "Test$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "\nTest$tnum: Test of cursor stability when\
+ duplicates are moved off-page."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize "
+ set txn ""
+
+ set dlist [list "-dup" "-dup -dupsort"]
+ set testid 0
+ foreach dupopt $dlist {
+ if { $is_je_test || [is_compressed $args] } {
+ if { $dupopt == "-dup" } {
+ continue
+ }
+ }
+
+ incr testid
+ set duptestfile $basename$testid.db
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $dupopt {$duptestfile}]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ puts \
+"\tTest$tnum.a: ($dupopt) Set up surrounding keys and cursors."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$prekey $predatum}]
+ error_check_good pre_put $ret 0
+ set ret [eval {$db put} $txn {$postkey $postdatum}]
+ error_check_good post_put $ret 0
+
+ set precursor [eval {$db cursor} $txn]
+ error_check_good precursor [is_valid_cursor $precursor \
+ $db] TRUE
+ set postcursor [eval {$db cursor} $txn]
+ error_check_good postcursor [is_valid_cursor $postcursor \
+ $db] TRUE
+ error_check_good preset [$precursor get -set $prekey] \
+ [list [list $prekey $predatum]]
+ error_check_good postset [$postcursor get -set $postkey] \
+ [list [list $postkey $postdatum]]
+
+ puts "\tTest$tnum.b: Put/create cursor/verify all cursor loop."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ puts "\tTest$tnum.c: Reverse Put/create cursor/verify all cursor loop."
+ set end [expr $ndups * 2 - 1]
+ for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ error_check_bad dbc($i)_stomped [info exists dbc($i)] 1
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j $i } { $j < $end } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ # Close cursors.
+ puts "\tTest$tnum.d: Closing cursors."
+ for { set i 0 } { $i <= $end } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ unset dbc
+ error_check_good precursor_close [$precursor close] 0
+ error_check_good postcursor_close [$postcursor close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ }
+}
diff --git a/db-4.8.30/test/test073.tcl b/db-4.8.30/test/test073.tcl
new file mode 100644
index 0000000..95c82c4
--- /dev/null
+++ b/db-4.8.30/test/test073.tcl
@@ -0,0 +1,296 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test073
+# TEST Test of cursor stability on duplicate pages.
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test073 { method {pagesize 512} {ndups 50} {tnum "073"} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "the key"
+ set txn ""
+
+ puts -nonewline "Test$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ }
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+
+ puts "cursor stability on duplicate pages."
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test073: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize -dup"
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys 0
+
+ puts "\tTest$tnum.a.1: Initializing put loop; $ndups dups, short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set is_long($i) 0
+ incr keys
+ }
+
+ puts "\tTest$tnum.a.2: Initializing cursor get loop; $keys dups."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 0 } { $i < $keys } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+ }
+
+ puts "\tTest$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ error_check_good "cursor close" [$curs close] 0
+
+ set is_long($i) 1
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest$tnum.g: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+}
+
+# !!!: This procedure is also used by test087.
+proc makedatum_t73 { num is_long } {
+ global alphabet
+ if { $is_long == 1 } {
+ set a $alphabet$alphabet$alphabet
+ } else {
+ set a abcdefghijklm
+ }
+
+ # format won't do leading zeros, alas.
+ if { $num / 1000 > 0 } {
+ set i $num
+ } elseif { $num / 100 > 0 } {
+ set i 0$num
+ } elseif { $num / 10 > 0 } {
+ set i 00$num
+ } else {
+ set i 000$num
+ }
+
+ return $i$a
+}
+
+# !!!: This procedure is also used by test087.
+proc verify_t73 { is_long_array curs_array numkeys key } {
+ upvar $is_long_array is_long
+ upvar $curs_array dbc
+ upvar db db
+
+ #useful for debugging, perhaps.
+ eval $db sync
+
+ for { set j 0 } { $j < $numkeys } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ error_check_good\
+ "cursor $j key correctness (with $numkeys total items)"\
+ $k $key
+ error_check_good\
+ "cursor $j data correctness (with $numkeys total items)"\
+ $d [makedatum_t73 $j $is_long($j)]
+ }
+}
diff --git a/db-4.8.30/test/test074.tcl b/db-4.8.30/test/test074.tcl
new file mode 100644
index 0000000..5a9ea5d
--- /dev/null
+++ b/db-4.8.30/test/test074.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test074
+# TEST Test of DB_NEXT_NODUP.
+proc test074 { method {dir -nextnodup} {nitems 100} {tnum "074"} args } {
+ source ./include.tcl
+ global alphabet
+ global is_je_test
+ global rand_init
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ berkdb srand $rand_init
+
+ # Data prefix--big enough that we get a mix of on-page, off-page,
+ # and multi-off-page dups with the default nitems
+ if { [is_fixed_length $method] == 1 } {
+ set globaldata "somedata"
+ } else {
+ set globaldata [repeat $alphabet 4]
+ }
+
+ puts "Test$tnum $omethod ($args): Test of $dir"
+
+ # First, test non-dup (and not-very-interesting) case with
+ # all db types.
+
+ puts "\tTest$tnum.a: No duplicates."
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum-nodup.db
+ set env NULL
+ } else {
+ set testfile test$tnum-nodup.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $omethod\
+ $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ # Insert nitems items.
+ puts "\t\tTest$tnum.a.1: Put loop."
+ for {set i 1} {$i <= $nitems} {incr i} {
+ #
+ # If record based, set key to $i * 2 to leave
+ # holes/unused entries for further testing.
+ #
+ if {[is_record_based $method] == 1} {
+ set key [expr $i * 2]
+ } else {
+ set key "key$i"
+ }
+ set data "$globaldata$i"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\t\tTest$tnum.a.2: Get($dir)"
+
+ # foundarray($i) is set when key number i is found in the database
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Initialize foundarray($i) to zero for all $i
+ for {set i 1} {$i < $nitems} {incr i} {
+ set foundarray($i) 0
+ }
+
+ # Walk database using $dir and record each key gotten.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ if {[is_record_based $method] == 1} {
+ set num [expr $key / 2]
+ set desired_key $key
+ error_check_good $method:num $key [expr $num * 2]
+ } else {
+ set num [string range $key 3 end]
+ set desired_key key$num
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ [pad_data $method $globaldata$num]]]
+
+ set foundarray($num) 1
+ }
+
+ puts "\t\tTest$tnum.a.3: Final key."
+ error_check_good last_db_get [$dbc get $dir] [list]
+
+ puts "\t\tTest$tnum.a.4: Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close(nodup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # If we are a method that doesn't allow dups, verify that
+ # we get an empty list if we try to use DB_NEXT_DUP
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\t\tTest$tnum.a.5: Check DB_NEXT_DUP for $method."
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set dbt [$dbc get $dir]
+ error_check_good $method:nextdup [$dbc get -nextdup] [list]
+ error_check_good dbc_close(nextdup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good db_close(nodup) [$db close] 0
+
+ # Quit here if we're a method that won't allow dups.
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest$tnum: Skipping remainder for method $method."
+ return
+ }
+
+ foreach opt { "-dup" "-dupsort" } {
+ if { $is_je_test || [is_compressed $args] } {
+ if { $opt == "-dup" } {
+ continue
+ }
+ }
+
+ #
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum$opt.db
+ } else {
+ set testfile test$tnum$opt.db
+ }
+
+ if { [string compare $opt "-dupsort"] == 0 } {
+ set opt "-dup -dupsort"
+ }
+
+ puts "\tTest$tnum.b: Duplicates ($opt)."
+
+ puts "\t\tTest$tnum.b.1 ($opt): Put loop."
+ set db [eval {berkdb_open -create -mode 0644}\
+ $opt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Insert nitems different keys such that key i has i dups.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set key key$i
+
+ for {set j 1} {$j <= $i} {incr j} {
+ if { $j < 10 } {
+ set data "${globaldata}00$j"
+ } elseif { $j < 100 } {
+ set data "${globaldata}0$j"
+ } else {
+ set data "$globaldata$j"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($i,$j) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ # Initialize foundarray($i) to 0 for all i.
+ unset foundarray
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set foundarray($i) 0
+ }
+
+ # Get loop--after each get, move forward a random increment
+ # within the duplicate set.
+ puts "\t\tTest$tnum.b.2 ($opt): Get loop."
+ set one "001"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ set num [string range $key 3 end]
+
+ set desired_key key$num
+ if { [string compare $dir "-prevnodup"] == 0 } {
+ if { $num < 10 } {
+ set one "00$num"
+ } elseif { $num < 100 } {
+ set one "0$num"
+ } else {
+ set one $num
+ }
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ "$globaldata$one"]]
+
+ set foundarray($num) 1
+
+ # Go forward by some number w/i dup set.
+ set inc [berkdb random_int 0 [expr $num - 1]]
+ for { set j 0 } { $j < $inc } { incr j } {
+ eval {$dbc get -nextdup}
+ }
+ }
+
+ puts "\t\tTest$tnum.b.3 ($opt): Final key."
+ error_check_good last_db_get($opt) [$dbc get $dir] [list]
+
+ # Verify
+ puts "\t\tTest$tnum.b.4 ($opt): Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/db-4.8.30/test/test076.tcl b/db-4.8.30/test/test076.tcl
new file mode 100644
index 0000000..cd0e7ce
--- /dev/null
+++ b/db-4.8.30/test/test076.tcl
@@ -0,0 +1,90 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test076
+# TEST Test creation of many small databases in a single environment. [#1528].
+proc test076 { method { ndbs 1000 } { tnum "076" } args } {
+ global is_qnx_test
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ } else {
+ set key "key"
+ }
+ set data "datamoredatamoredata"
+
+ # Create an env if we weren't passed one.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set deleteenv 1
+ env_cleanup $testdir
+ set env [eval \
+ {berkdb_env -create -home} $testdir $pageargs $encargs]
+ error_check_good env [is_valid_env $env] TRUE
+ set args "$args -env $env"
+ } else {
+ set deleteenv 0
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndbs == 1000 } {
+ set ndbs 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ if { $is_qnx_test && $ndbs > 100 } {
+ set ndbs 100
+ }
+ if { [is_queueext $method] } {
+ set ndbs 500
+ }
+
+ puts -nonewline "Test$tnum $method ($args): "
+ puts -nonewline "Create $ndbs"
+ puts " small databases in one env."
+
+ cleanup $testdir $env
+ set txn ""
+
+ for { set i 1 } { $i <= $ndbs } { incr i } {
+ set testfile test$tnum.$i.db
+
+ set db [eval {berkdb_open -create -mode 0644}\
+ $args $omethod $testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i \
+ [chop_data $method $data$i]}]
+ error_check_good db_put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close($i) [$db close] 0
+ }
+
+ if { $deleteenv == 1 } {
+ error_check_good env_close [$env close] 0
+ }
+
+ puts "\tTest$tnum passed."
+}
diff --git a/db-4.8.30/test/test077.tcl b/db-4.8.30/test/test077.tcl
new file mode 100644
index 0000000..36ba45d
--- /dev/null
+++ b/db-4.8.30/test/test077.tcl
@@ -0,0 +1,92 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test077
+# TEST Test of DB_GET_RECNO [#1206].
+proc test077 { method { nkeys 1000 } { tnum "077" } args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test$tnum: Test of DB_GET_RECNO."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest$tnum: Skipping for method $method."
+ return
+ }
+
+ set data $alphabet
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.a: Populating database."
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set key [format %5d $i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good db_put($key) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest$tnum.b: Verifying record numbers."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
+
+ set i 1
+ for { set dbt [$dbc get -first] } \
+ { [string length $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set recno [$dbc get -get_recno]
+ set keynum [expr [lindex [lindex $dbt 0] 0]]
+
+ # Verify that i, the number that is the key, and recno
+ # are all equal.
+ error_check_good key($i) $keynum $i
+ error_check_good recno($i) $recno $i
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test078.tcl b/db-4.8.30/test/test078.tcl
new file mode 100644
index 0000000..5c9f2d6
--- /dev/null
+++ b/db-4.8.30/test/test078.tcl
@@ -0,0 +1,252 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test078
+# TEST Test of DBC->c_count(). [#303]
+proc test078 { method { nkeys 100 } { pagesize 512 } { tnum "078" } args } {
+ source ./include.tcl
+ global alphabet
+ global is_je_test
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test$tnum ($method): Test of key counts."
+
+ berkdb srand $rand_init
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test$tnum-a.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set nkeys 50
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test078: skipping for specific pagesizes"
+ return
+ }
+ puts "\tTest$tnum.a: No duplicates, trivial answer."
+ puts "\t\tTest$tnum.a.1: Populate database, verify dup counts."
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $alphabet$i]}]
+ error_check_good put.a($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good count.a [$db count $i] 1
+ }
+
+ if { [is_rrecno $method] == 1 } {
+ error_check_good db_close.a [$db close] 0
+ puts "\tTest$tnum.a2: Skipping remainder of test078 for -rrecno."
+ return
+ }
+
+ puts "\t\tTest$tnum.a.2: Delete items, verify dup counts again."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn $i]
+ error_check_good del.a($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good count.a [$db count $i] 0
+ }
+
+
+ error_check_good db_close.a [$db close] 0
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts \
+ "\tTest$tnum.b: Duplicates not supported in $method, skipping."
+ return
+ }
+
+ foreach {let descrip dupopt} \
+ {b sorted "-dup -dupsort" c unsorted "-dup"} {
+
+ if { [is_compressed $args] } {
+ if { $dupopt == "-dup" } {
+ continue
+ }
+ }
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test$tnum-b.db
+ set env [lindex $args $eindex]
+ if { $is_je_test } {
+ if { $dupopt == "-dup" } {
+ continue
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest$tnum.$let: Duplicates ($descrip)."
+ puts "\t\tTest$tnum.$let.1: Populating database."
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $dupopt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ for { set j 0 } { $j < $i } { incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $j$alphabet]}]
+ error_check_good put.$let,$i $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ puts -nonewline "\t\tTest$tnum.$let.2: "
+ puts "Verifying duplicate counts."
+$db sync
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ error_check_good count.$let,$i \
+ [$db count $i] $i
+ }
+
+ puts -nonewline "\t\tTest$tnum.$let.3: "
+ puts "Delete every other dup by cursor, verify counts."
+
+ # Delete every other item by cursor and check counts.
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ set j 0
+
+ for { set ret [$c get -first]} { [llength $ret] > 0 } \
+ { set ret [$c get -next]} {
+ set key [lindex [lindex $ret 0] 0]
+ if { $key == $i } {
+ set data [lindex [lindex $ret 0 ] 1]
+ set num [string range $data 0 \
+ end-[string length $alphabet]]
+ if { [expr $num % 2] == 0 } {
+ error_check_good \
+ c_del [$c del] 0
+ incr j
+ }
+ if { $txnenv == 0 } {
+ error_check_good count.$let.$i-$j \
+ [$db count $i] [expr $i - $j]
+ }
+ }
+ }
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good count.$let.$i-$j \
+ [$db count $i] [expr $i - $j]
+ }
+
+ puts -nonewline "\t\tTest$tnum.$let.4: "
+ puts "Delete all items by cursor, verify counts."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ for { set ret [$c get -first]} { [llength $ret] > 0 } \
+ { set ret [$c get -next]} {
+ set key [lindex [lindex $ret 0] 0]
+ if { $key == $i } {
+ error_check_good c_del [$c del] 0
+ }
+ }
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_count_zero [$db count $i] 0
+ }
+
+ puts -nonewline "\t\tTest$tnum.$let.5: "
+ puts "Add back one item, verify counts."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $alphabet]}]
+ error_check_good put.$let,$i $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good add_one [$db count $i] 1
+ }
+
+ puts -nonewline "\t\tTest$tnum.$let.6: "
+ puts "Delete remaining entries, verify counts."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good db_del [eval {$db del} $txn {$i}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good count.$let.$i [$db count $i] 0
+ }
+ error_check_good db_close.$let [$db close] 0
+ }
+}
diff --git a/db-4.8.30/test/test079.tcl b/db-4.8.30/test/test079.tcl
new file mode 100644
index 0000000..b1c31d1
--- /dev/null
+++ b/db-4.8.30/test/test079.tcl
@@ -0,0 +1,28 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test079
+# TEST Test of deletes in large trees. (test006 w/ sm. pagesize).
+# TEST
+# TEST Check that delete operations work in large btrees. 10000 entries
+# TEST and a pagesize of 512 push this out to a four-level btree, with a
+# TEST small fraction of the entries going on overflow pages.
+proc test079 { method {nentries 10000} {pagesize 512} {tnum "079"} \
+ {ndups 20} args} {
+ if { [ is_queueext $method ] == 1 } {
+ set method "queue";
+ lappend args "-extent" "20"
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ eval {test006 $method $nentries 1 $tnum $ndups -pagesize \
+ $pagesize} $args
+}
diff --git a/db-4.8.30/test/test081.tcl b/db-4.8.30/test/test081.tcl
new file mode 100644
index 0000000..0c7b490
--- /dev/null
+++ b/db-4.8.30/test/test081.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test081
+# TEST Test off-page duplicates and overflow pages together with
+# TEST very large keys (key/data as file contents).
+proc test081 { method {ndups 13} {tnum "081"} args} {
+ source ./include.tcl
+
+ eval {test017 $method 1 $ndups $tnum} $args
+}
diff --git a/db-4.8.30/test/test082.tcl b/db-4.8.30/test/test082.tcl
new file mode 100644
index 0000000..e90057d
--- /dev/null
+++ b/db-4.8.30/test/test082.tcl
@@ -0,0 +1,13 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test082
+# TEST Test of DB_PREV_NODUP (uses test074).
+proc test082 { method {dir -prevnodup} {nitems 100} {tnum "082"} args} {
+ source ./include.tcl
+
+ eval {test074 $method $dir $nitems $tnum} $args
+}
diff --git a/db-4.8.30/test/test083.tcl b/db-4.8.30/test/test083.tcl
new file mode 100644
index 0000000..1763e58
--- /dev/null
+++ b/db-4.8.30/test/test083.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test083
+# TEST Test of DB->key_range.
+proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
+ source ./include.tcl
+
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test083 $method ($args): Test of DB->key_range"
+ if { [is_btree $method] != 1 } {
+ puts "\tTest083: Skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test083: skipping for specific pagesizes"
+ return
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test083.db
+ set env NULL
+ } else {
+ set testfile test083.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ # We assume that numbers will be at most six digits wide
+ error_check_bad maxitems_range [expr $maxitems > 999999] 1
+
+ # We want to test key_range on a variety of sizes of btree.
+ # Start at ten keys and work up to $maxitems keys, at each step
+ # multiplying the number of keys by $step.
+ for { set nitems 10 } { $nitems <= $maxitems }\
+ { set nitems [expr $nitems * $step] } {
+
+ puts "\tTest083.a: Opening new database"
+ if { $env != "NULL"} {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} \
+ -pagesize $pgsz $omethod $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ t83_build $db $nitems $env $txnenv
+ t83_test $db $nitems $env $txnenv $args
+
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc t83_build { db nitems env txnenv } {
+ source ./include.tcl
+
+ puts "\tTest083.b: Populating database with $nitems keys"
+
+ set keylist {}
+ puts "\t\tTest083.b.1: Generating key list"
+ for { set i 0 } { $i < $nitems } { incr i } {
+ lappend keylist $i
+ }
+
+ # With randomly ordered insertions, the range of errors we
+ # get from key_range can be unpredictably high [#2134]. For now,
+ # just skip the randomization step.
+ #puts "\t\tTest083.b.2: Randomizing key list"
+ #set keylist [randomize_list $keylist]
+ #puts "\t\tTest083.b.3: Populating database with randomized keys"
+
+ puts "\t\tTest083.b.2: Populating database"
+ set data [repeat . 50]
+ set txn ""
+ foreach keynum $keylist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key[format %6d $keynum] $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+}
+
+proc t83_test { db nitems env txnenv args} {
+ # Look at the first key, then at keys about 1/4, 1/2, 3/4, and
+ # all the way through the database. Make sure the key_ranges
+ # aren't off by more than 10%.
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ } else {
+ set txn ""
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest083.c: Verifying ranges..."
+
+ # Wild guess. "Tolerance" tests how close the key is to
+ # its expected position. "Sumtol" tests the sum of the
+ # "less than", "equal to", and "more than", which is
+ # expected to be around 1.
+
+ if { [is_compressed $args] == 1 } {
+ set tolerance 0.5
+ set sumtol 0.3
+ } elseif { $nitems < 500 || [is_partitioned $args] } {
+ set tolerance 0.3
+ set sumtol 0.05
+ } elseif { $nitems > 500 } {
+ set tolerance 0.2
+ set sumtol 0.05
+ }
+
+ for { set i 0 } { $i < $nitems } \
+ { incr i [expr $nitems / [berkdb random_int 3 16]] } {
+ puts -nonewline "\t\t...key $i"
+ error_check_bad key0 [llength [set dbt [$dbc get -first]]] 0
+
+ for { set j 0 } { $j < $i } { incr j } {
+ error_check_bad key$j \
+ [llength [set dbt [$dbc get -next]]] 0
+ }
+
+ set ranges [$db keyrange [lindex [lindex $dbt 0] 0]]
+ #puts "ranges is $ranges"
+ error_check_good howmanyranges [llength $ranges] 3
+
+ set lessthan [lindex $ranges 0]
+ set morethan [lindex $ranges 2]
+
+ puts -nonewline " ... sum of ranges"
+ set rangesum [expr $lessthan + [lindex $ranges 1] + $morethan]
+ roughly_equal $rangesum 1 $sumtol
+
+ puts "... position of key."
+ roughly_equal $lessthan [expr $i * 1.0 / $nitems] $tolerance
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+}
+
+proc roughly_equal { a b tol } {
+ error_check_good "$a =~ $b" [expr abs($a - $b) < $tol] 1
+}
diff --git a/db-4.8.30/test/test084.tcl b/db-4.8.30/test/test084.tcl
new file mode 100644
index 0000000..e0856ce
--- /dev/null
+++ b/db-4.8.30/test/test084.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test084
+# TEST Basic sanity test (test001) with large (64K) pages.
+proc test084 { method {nentries 10000} {tnum "084"} {pagesize 65536} args} {
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum-empty.db
+ set env NULL
+ } else {
+ set testfile test$tnum-empty.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test084: skipping for specific pagesizes"
+ return
+ }
+
+ cleanup $testdir $env
+
+ set args "-pagesize $pagesize $args"
+
+ eval {test001 $method $nentries 0 0 $tnum} $args
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ # For good measure, create a second database that's empty
+ # with the large page size. (There was a verifier bug that
+ # choked on empty 64K pages. [#2408])
+ set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
+ error_check_good empty_db [is_valid_db $db] TRUE
+ error_check_good empty_db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test085.tcl b/db-4.8.30/test/test085.tcl
new file mode 100644
index 0000000..fc09a26
--- /dev/null
+++ b/db-4.8.30/test/test085.tcl
@@ -0,0 +1,340 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test085
+# TEST Test of cursor behavior when a cursor is pointing to a deleted
+# TEST btree key which then has duplicates added. [#2473]
+proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum "085"} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test085: skipping for specific pagesizes"
+ return
+ }
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "AA"
+ set key "BBB"
+ set postkey "CCCC"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set datum $alphabet
+ set postdatum "0987654321"
+ set txn ""
+
+ append args " -pagesize $pagesize -dup"
+
+ puts -nonewline "Test$tnum $omethod ($args): "
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ }
+ # Skip for all non-btrees. (Rbtrees don't count as btrees, for
+ # now, since they don't support dups.)
+ if { [is_btree $method] != 1 } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Duplicates w/ deleted item cursor."
+ }
+
+ # Repeat the test with both on-page and off-page numbers of dups.
+ foreach ndups "$onp $offp" {
+ # Put operations we want to test on a cursor set to the
+ # deleted item, the key to use with them, and what should
+ # come before and after them given a placement of
+ # the deleted item at the beginning or end of the dupset.
+ set final [expr $ndups - 1]
+ set putops {
+ {{-before} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-before} "" {[test085_ddatum $final]} $postdatum end}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} end}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum end}
+ {{-after} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-after} "" {[test085_ddatum $final]} $postdatum end}
+ }
+
+ # Get operations we want to test on a cursor set to the
+ # deleted item, any args to get, and the expected key/data pair.
+ set getops {
+ {{-current} "" "" "" beginning}
+ {{-current} "" "" "" end}
+ {{-next} "" $key {[test085_ddatum 0]} beginning}
+ {{-next} "" $postkey $postdatum end}
+ {{-prev} "" $prekey $predatum beginning}
+ {{-prev} "" $key {[test085_ddatum $final]} end}
+ {{-first} "" $prekey $predatum beginning}
+ {{-first} "" $prekey $predatum end}
+ {{-last} "" $postkey $postdatum beginning}
+ {{-last} "" $postkey $postdatum end}
+ {{-nextdup} "" $key {[test085_ddatum 0]} beginning}
+ {{-nextdup} "" EMPTYLIST "" end}
+ {{-nextnodup} "" $postkey $postdatum beginning}
+ {{-nextnodup} "" $postkey $postdatum end}
+ {{-prevnodup} "" $prekey $predatum beginning}
+ {{-prevnodup} "" $prekey $predatum end}
+ }
+
+ set txn ""
+ foreach pair $getops {
+ set op [lindex $pair 0]
+ puts "\tTest$tnum: Get ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $encargs $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ set gargs [lindex $pair 1]
+ set ekey ""
+ set edata ""
+ eval set ekey [lindex $pair 2]
+ eval set edata [lindex $pair 3]
+
+ set dbt [eval $dbc get $op $gargs]
+ if { [string compare $ekey EMPTYLIST] == 0 || \
+ [string compare $op -current] == 0 } {
+ error_check_good dbt($op,$ndups) \
+ [llength $dbt] 0
+ } else {
+ error_check_good dbt($op,$ndups) $dbt \
+ [list [list $ekey $edata]]
+ }
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t" 0 0 $nodump
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+
+ }
+
+ foreach pair $putops {
+ # Open and set up database.
+ set op [lindex $pair 0]
+ puts "\tTest$tnum: Put ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $args $encargs $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ # Put duplicates.
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ # Set up cursors for stability test.
+ set pre_dbc [eval {$db cursor} $txn]
+ error_check_good pre_set [$pre_dbc get -set $prekey] \
+ [list [list $prekey $predatum]]
+ set post_dbc [eval {$db cursor} $txn]
+ error_check_good post_set [$post_dbc get -set $postkey]\
+ [list [list $postkey $postdatum]]
+ set first_dbc [eval {$db cursor} $txn]
+ error_check_good first_set \
+ [$first_dbc get -get_both $key [test085_ddatum 0]] \
+ [list [list $key [test085_ddatum 0]]]
+ set last_dbc [eval {$db cursor} $txn]
+ error_check_good last_set \
+ [$last_dbc get -get_both $key [test085_ddatum \
+ [expr $ndups - 1]]] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ set k [lindex $pair 1]
+ set d_before ""
+ set d_after ""
+ eval set d_before [lindex $pair 2]
+ eval set d_after [lindex $pair 3]
+ set newdatum "NewDatum"
+ error_check_good dbc_put($op,$ndups) \
+ [eval $dbc put $op $k $newdatum] 0
+ error_check_good dbc_prev($op,$ndups) \
+ [lindex [lindex [$dbc get -prev] 0] 1] \
+ $d_before
+ error_check_good dbc_current($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $newdatum
+
+ error_check_good dbc_next($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $d_after
+
+ # Verify stability of pre- and post- cursors.
+ error_check_good pre_stable [$pre_dbc get -current] \
+ [list [list $prekey $predatum]]
+ error_check_good post_stable [$post_dbc get -current] \
+ [list [list $postkey $postdatum]]
+ error_check_good first_stable \
+ [$first_dbc get -current] \
+ [list [list $key [test085_ddatum 0]]]
+ error_check_good last_stable \
+ [$last_dbc get -current] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
+ error_check_good ${c}_close [$c close] 0
+ }
+
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t" 0 0 $nodump
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+ }
+ }
+}
+
+# Set up the test database; put $prekey, $key, and $postkey with their
+# respective data, and then delete $key with a new cursor. Return that
+# cursor, still pointing to the deleted item.
+proc test085_setup { db txn } {
+ upvar key key
+ upvar prekey prekey
+ upvar postkey postkey
+ upvar predatum predatum
+ upvar postdatum postdatum
+
+ # no one else should ever see this one!
+ set datum "bbbbbbbb"
+
+ error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0
+ error_check_good main_put [eval {$db put} $txn {$key $datum}] 0
+ error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good dbc_getset [$dbc get -get_both $key $datum] \
+ [list [list $key $datum]]
+
+ error_check_good dbc_del [$dbc del] 0
+
+ return $dbc
+}
+
+proc test085_ddatum { a } {
+ global alphabet
+ return $a$alphabet
+}
diff --git a/db-4.8.30/test/test086.tcl b/db-4.8.30/test/test086.tcl
new file mode 100644
index 0000000..16ed5fd
--- /dev/null
+++ b/db-4.8.30/test/test086.tcl
@@ -0,0 +1,168 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test086
+# TEST Test of cursor stability across btree splits/rsplits with
+# TEST subtransaction aborts (a variant of test048). [#2373]
+proc test086 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tnum 086
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tnum skipping for method $method."
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tnum: Test of cursor stability across aborted\
+ btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then this test won't work.
+ if { $eindex == -1 } {
+ # But we will be using our own env...
+ set testfile test$tnum.db
+ } else {
+ puts "\tTest$tnum: Environment provided; skipping test."
+ return
+ }
+ set t1 $testdir/t1
+ env_cleanup $testdir
+
+ set env [eval \
+ {berkdb_env -create -home $testdir -txn} $pageargs $encargs]
+ error_check_good berkdb_env [is_valid_env $env] TRUE
+
+ puts "\tTest$tnum.a: Create $method database."
+ set oflags "-auto_commit -create -env $env -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tnum.b: Fill page with $nkeys small key/data pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put -txn $txn key000$i $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good commit [$txn commit] 0
+
+ # get db ordering, set cursors
+ puts "\tTest$tnum.c: Set cursors on each of $nkeys pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for {set i 0; set ret [$db get -txn $txn key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get -txn $txn key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [$db cursor -txn $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # Create child txn.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tnum.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $ctxn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $ctxn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $ctxn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tnum.e: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tnum.f: Check and see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ # Put (and this time keep) the keys that caused the split.
+ # We'll delete them to test reverse splits.
+ puts "\tTest$tnum.g: Put back added keys."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $txn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $txn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $txn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tnum.h: Delete added keys to force reverse split."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i [$db del -txn $ctxn key0$i] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key00$i] 0
+ } else {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key000$i] 0
+ }
+ }
+
+ puts "\tTest$tnum.i: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tnum.j: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tnum.j: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+
+ error_check_good commit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tTest$tnum complete."
+}
diff --git a/db-4.8.30/test/test087.tcl b/db-4.8.30/test/test087.tcl
new file mode 100644
index 0000000..e7557b7
--- /dev/null
+++ b/db-4.8.30/test/test087.tcl
@@ -0,0 +1,293 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test087
+# TEST Test of cursor stability when converting to and modifying
+# TEST off-page duplicate pages with subtransaction aborts. [#2373]
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each. Do each put twice,
+# TEST first aborting, then committing, so we're sure to abort the move
+# TEST to off-page dups at some point.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test087 { method {pagesize 512} {ndups 50} {tnum "087"} args } {
+ source ./include.tcl
+ global alphabet
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test$tnum $omethod ($args): "
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then return
+ if { $eindex != -1 } {
+ puts "Environment specified; skipping."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test087: skipping for specific pagesizes"
+ return
+ }
+ env_cleanup $testdir
+ set testfile test$tnum.db
+ set key "the key"
+ append args " -pagesize $pagesize -dup"
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } elseif { [is_compressed $args] == 1 } {
+ puts "Test$tnum skipping for btree with compression."
+ return
+ } else {
+ puts "Test$tnum: Cursor stability on dup. pages w/ aborts."
+ }
+
+ set env [eval {berkdb_env \
+ -create -home $testdir -txn -pagesize $pagesize} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -auto_commit \
+ -create -env $env -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys $ndups
+
+ puts "\tTest$tnum.a: put/abort/put/commit loop;\
+ $ndups dups, short data."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/abort ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+
+ verify_t73 is_long dbc [expr $i - 1] $key
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/commit ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_commit($i) [$ctxn commit] 0
+
+ set is_long($i) 0
+
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ verify_t73 is_long dbc $i $key
+ }
+
+ puts "\tTest$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ # We can't do a verification while a child txn is active,
+ # or we'll run into trouble when DEBUG_ROP is enabled.
+ # If this test has trouble, though, uncommenting this
+ # might be illuminating--it makes things a bit more rigorous
+ # and works fine when DEBUG_ROP is not enabled.
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ # verify_t73 is_long dbc $keys $key
+ # verify_t73 is_long dbc $keys $key
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ set is_long($i) 1
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set is_long($i) 0
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Now delete the first item, abort the deletion, and make sure
+ # we're still sane.
+ puts "\tTest$tnum.g: Cursor delete first item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for the last item.
+ puts "\tTest$tnum.h: Cursor delete last item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 [expr $keys - 1] 0]
+ error_check_good "c_get(DB_GET_BOTH, [expr $keys - 1])"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for all the items.
+ puts "\tTest$tnum.i: Cursor delete all items, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ for { set i 1 } { $i < $keys } { incr i } {
+ error_check_good "c_get(DB_NEXT, $i)"\
+ [$curs get -next] [list [list $key [makedatum_t73 $i 0]]]
+ error_check_good "c_del($i)" [$curs del] 0
+ }
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest$tnum.j: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good "db close" [$db close] 0
+ error_check_good "env close" [$env close] 0
+}
diff --git a/db-4.8.30/test/test088.tcl b/db-4.8.30/test/test088.tcl
new file mode 100644
index 0000000..cd2e463
--- /dev/null
+++ b/db-4.8.30/test/test088.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test088
+# TEST Test of cursor stability across btree splits with very
+# TEST deep trees (a variant of test048). [#2514]
+proc test088 { method args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+ global is_je_test
+
+ set tstn 088
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test088: skipping for specific pagesizes"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key$alphabet$alphabet$alphabet"
+ set data "data$alphabet$alphabet$alphabet"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tstn.db
+ set env NULL
+ } else {
+ set testfile test$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set ps 512
+ set txn ""
+ set oflags "-create -pagesize $ps -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ key/data pairs.
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 30000
+ if { [is_compressed $args] } {
+ set mkeys 300
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set mkeys 300
+ }
+ for {set i 0; set ret [$db get ${key}00000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get ${key}00000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db put} $txn {${key}0$i $data$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db put} $txn {${key}00$i $data$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db put} $txn {${key}000$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {${key}0000$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure splits happened."
+ # XXX cannot execute stat in presence of txns and cursors.
+ if { $txnenv == 0 && !$is_je_test } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db del} $txn {${key}0$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db del} $txn {${key}00$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db del} $txn {${key}000$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db del} $txn {${key}0000$i}]
+ } else {
+ set ret [eval {$db del} $txn {${key}00000$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/db-4.8.30/test/test089.tcl b/db-4.8.30/test/test089.tcl
new file mode 100644
index 0000000..ebe5b95
--- /dev/null
+++ b/db-4.8.30/test/test089.tcl
@@ -0,0 +1,275 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test089
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Enhanced CDB testing to test off-page dups, cursor dups and
+# TEST cursor operations like c_del then c_get.
+proc test089 { method {nentries 1000} args } {
+ global datastr
+ global encrypt
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test089 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set args [convert_args $method $args]
+ set oargs [split_encargs $args encargs]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ puts "Test089: ($oargs) $method CDB Test cursor/dup operations"
+
+ # Process arguments
+ # Create the database and open the dictionary
+ set testfile test089.db
+ set testfile1 test089a.db
+
+ env_cleanup $testdir
+
+ set env [eval \
+ {berkdb_env -create -cdb} $pageargs $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set db1 [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest089.a: Put loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ set ret [eval {$db1 put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db1 $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+ error_check_good close:$db1 [$db1 close] 0
+
+ # Database is created, now set up environment
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [eval {berkdb envremove} $encargs -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval \
+ {berkdb_env_noerr -create -cdb} $pageargs $encargs -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ puts "\tTest089.b: CDB cursor dups"
+
+ set db1 [eval {berkdb_open_noerr -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ # Create a read-only cursor and make sure we can't write with it.
+ set dbcr [$db1 cursor]
+ error_check_good dbcursor [is_valid_cursor $dbcr $db1] TRUE
+ set ret [$dbcr get -first]
+ catch { [$dbcr put -current data] } ret
+ error_check_good is_read_only \
+ [is_substr $ret "Write attempted on read-only cursor"] 1
+ error_check_good dbcr_close [$dbcr close] 0
+
+ # Create a write cursor and duplicate it.
+ set dbcw [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbcw $db1] TRUE
+ set dup_dbcw [$dbcw dup]
+ error_check_good dup_write_cursor [is_valid_cursor $dup_dbcw $db1] TRUE
+
+ # Position both cursors at get -first. They should find the same data.
+ set get_first [$dbcw get -first]
+ set get_first_dup [$dup_dbcw get -first]
+ error_check_good dup_read $get_first $get_first_dup
+
+ # Test that the write cursors can both write and that they
+ # read each other's writes correctly. First write reversed
+ # datastr with original cursor and read with dup cursor.
+ error_check_good put_current_orig \
+ [$dbcw put -current [chop_data $method [reverse $datastr]]] 0
+ set reversed [$dup_dbcw get -current]
+ error_check_good check_with_dup [lindex [lindex $reversed 0] 1] \
+ [pad_data $method [reverse $datastr]]
+
+ # Write forward datastr with dup cursor and read with original.
+ error_check_good put_current_dup \
+ [$dup_dbcw put -current [chop_data $method $datastr]] 0
+ set forward [$dbcw get -current]
+ error_check_good check_with_orig $forward $get_first
+
+ error_check_good dbcw_close [$dbcw close] 0
+ error_check_good dup_dbcw_close [$dup_dbcw close] 0
+
+ # This tests the failure found in #1923
+ puts "\tTest089.c: Test delete then get"
+
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rest of test089 for $method method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Skipping rest of test089 for specific pagesizes"
+ return
+ }
+
+ append oargs " -dup "
+ # Skip unsorted duplicates for btree with compression.
+ if { [is_compressed $args] == 0 } {
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+ }
+
+ append oargs " -dupsort "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+}
+
+proc test089_dup { testdir encargs oargs method nentries } {
+ env_cleanup $testdir
+ set pageargs ""
+ split_pageargs $oargs pageargs
+ set env [eval \
+ {berkdb_env -create -cdb} $encargs $pageargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set nkeys 5
+ set data "data"
+ set key "test089_key"
+ set testfile test089.db
+ puts "\tTest089.d: CDB ($oargs) off-page dups"
+ set oflags "-env $env -create -mode 0644 $oargs $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups"
+ for { set k 0 } { $k < $nkeys } { incr k } {
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set ret [$db put $key$k $i$data$k]
+ error_check_good dbput $ret 0
+ }
+ }
+
+ # Verify we have off-page duplicates
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1
+
+ # This tests the failure reported in #6950. Skip for -dupsort.
+ puts "\tTest089.f: Clear locks for duped off-page dup cursors."
+ if { [is_substr $oargs dupsort] != 1 } {
+ # Create a read cursor, put it on an off-page dup.
+ set dbcr [$db cursor]
+ error_check_good dbcr [is_valid_cursor $dbcr $db] TRUE
+ set offpage [$dbcr get -get_both test089_key4 900data4]
+ error_check_bad offpage [llength $offpage] 0
+
+ # Create a write cursor, put it on an off-page dup.
+ set dbcw [$db cursor -update]
+ error_check_good dbcw [is_valid_cursor $dbcw $db] TRUE
+ set offpage [$dbcw get -get_both test089_key3 900data3]
+ error_check_bad offpage [llength $offpage] 0
+
+ # Add a new item using the write cursor, then close the cursor.
+ error_check_good add_dup [$dbcw put -after $data] 0
+ error_check_good close_dbcw [$dbcw close] 0
+
+ # Get next dup with read cursor, then close the cursor.
+ set nextdup [$dbcr get -nextdup]
+ error_check_good close_dbcr [$dbcr close] 0
+ }
+
+ puts "\tTest089.g: CDB duplicate write cursors with off-page dups"
+ # Create a write cursor and duplicate it.
+ set dbcw [$db cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbcw $db] TRUE
+ set dup_dbcw [$dbcw dup]
+ error_check_good dup_write_cursor [is_valid_cursor $dup_dbcw $db] TRUE
+
+ # Position both cursors at get -first. They should find the same data.
+ set get_first [$dbcw get -first]
+ set get_first_dup [$dup_dbcw get -first]
+ error_check_good dup_read $get_first $get_first_dup
+
+ # Test with -after and -before. Skip for -dupsort.
+ if { [is_substr $oargs dupsort] != 1 } {
+ # Original and duplicate cursors both point to first item.
+ # Do a put -before of new string with original cursor,
+ # and a put -after of new string with duplicate cursor.
+ set newdata "newdata"
+ error_check_good put_before [$dbcw put -before $newdata] 0
+ error_check_good put_after [$dup_dbcw put -after $newdata] 0
+
+ # Now walk forward with original cursor ...
+ set first [$dbcw get -first]
+ error_check_good check_first [lindex [lindex $first 0] 1] $newdata
+ set next1 [$dbcw get -next]
+ error_check_good check_next1 $next1 $get_first
+ set next2 [$dbcw get -next]
+ error_check_good check_next2 [lindex [lindex $next2 0] 1] $newdata
+
+ # ... and backward with duplicate cursor.
+ set current [$dup_dbcw get -current]
+ error_check_good check_current [lindex [lindex $current 0] 1] $newdata
+ set prev1 [$dup_dbcw get -prev]
+ error_check_good check_prev1 $prev1 $get_first
+ set prev2 [$dup_dbcw get -prev]
+ error_check_good check_prev2 [lindex [lindex $prev2 0] 1] $newdata
+ }
+
+ puts "\tTest089.h: test delete then get of off-page dups"
+ for {set kd [$dbcw get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbcw get -next] } {
+ error_check_good dbcdel [$dbcw del] 0
+ }
+
+ error_check_good dbcw_close [$dbcw close] 0
+ error_check_good dup_dbcw_close [$dup_dbcw close] 0
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/db-4.8.30/test/test090.tcl b/db-4.8.30/test/test090.tcl
new file mode 100644
index 0000000..06c7a32
--- /dev/null
+++ b/db-4.8.30/test/test090.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test090
+# TEST Test for functionality near the end of the queue using test001.
+proc test090 { method {nentries 10000} {tnum "090"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test$tnum for $method."
+ return;
+ }
+ eval {test001 $method $nentries 4294967000 0 $tnum} $args
+}
diff --git a/db-4.8.30/test/test091.tcl b/db-4.8.30/test/test091.tcl
new file mode 100644
index 0000000..10725fc
--- /dev/null
+++ b/db-4.8.30/test/test091.tcl
@@ -0,0 +1,19 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test091
+# TEST Test of DB_CONSUME_WAIT.
+proc test091 { method {nconsumers 4} \
+ {nproducers 2} {nitems 1000} {start 0 } {tnum "091"} args} {
+ if { [is_queue $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -txn $tnum } $args
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -cdb $tnum } $args
+}
diff --git a/db-4.8.30/test/test092.tcl b/db-4.8.30/test/test092.tcl
new file mode 100644
index 0000000..0dad2d6
--- /dev/null
+++ b/db-4.8.30/test/test092.tcl
@@ -0,0 +1,252 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test092
+# TEST Test of DB_DIRTY_READ [#3395]
+# TEST
+# TEST We set up a database with nentries in it. We then open the
+# TEST database read-only twice. One with dirty reads and one without.
+# TEST We open the database for writing and update some entries in it.
+# TEST Then read those new entries via db->get (clean and dirty), and
+# TEST via cursors (clean and dirty).
+proc test092 { method {nentries 1000} args } {
+ source ./include.tcl
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test092 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ puts "Test092: Dirty Read Test $method $nentries"
+
+ # Create the database and open the dictionary
+ set testfile test092.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set lmax [expr $nentries * 2]
+ set lomax [expr $nentries * 2]
+ set env [eval \
+ {berkdb_env -create -txn} $pageargs $encargs -home $testdir \
+ -lock_max_locks $lmax -lock_max_objects $lomax]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put each key/data pair.
+ # Key is entry, data is entry also.
+ puts "\tTest092.a: put loop"
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} {$key [chop_data $method $str]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+
+ puts "\tTest092.b: Opening all the handles"
+ #
+ # Open all of our handles.
+ # We need:
+ # 1. Our main txn (t).
+ # 2. A txn that can read dirty data (tdr).
+ # 3. A db handle for writing via txn (dbtxn).
+ # 4. A db handle for clean data (dbcl).
+ # 5. A db handle for dirty data (dbdr).
+ # 6. A cursor handle for dirty txn data (clean db handle using
+ # the dirty txn handle on the cursor call) (dbccl1).
+ # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0).
+ # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1).
+ set t [$env txn]
+ error_check_good txnbegin [is_valid_txn $t $env] TRUE
+
+ set tdr [$env txn -read_uncommitted]
+ error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE
+ set dbtxn [eval {berkdb_open -auto_commit -env $env -read_uncommitted \
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE
+
+ set dbcl [eval {berkdb_open -auto_commit -env $env \
+ -rdonly -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE
+
+ set dbdr [eval {berkdb_open -auto_commit -env $env -read_uncommitted \
+ -rdonly -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -read_uncommitted]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ # Test that $db stat can use -read_uncommitted flag.
+ puts "\tTest092.c: Smoke test for db_stat -txn -read_uncommitted"
+ if { [catch \
+ {set statret [$dbcl stat -txn $t -read_uncommitted]} res] } {
+ puts "FAIL: db_stat -txn -read_uncommitted returned $res"
+ }
+
+ #
+ # Now that we have all of our handles, change all the data in there
+ # to be the key and data the same, but data is capitalized.
+ puts "\tTest092.d: put/get data within a txn"
+ set gflags ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092dr_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test092dr.check
+ }
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ustr [string toupper $str]
+ set clret [list [list $key [pad_data $method $str]]]
+ set drret [list [list $key [pad_data $method $ustr]]]
+ #
+ # Put the data in the txn.
+ #
+ set ret [eval {$dbtxn put} -txn $t \
+ {$key [chop_data $method $ustr]}]
+ error_check_good put:$dbtxn $ret 0
+
+ #
+ # Now get the data using the different db handles and
+ # make sure it is dirty or clean data.
+ #
+ # Using the dirty txn should show us dirty data
+ set ret [eval {$dbcl get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ set ret [eval {$dbdr get -read_uncommitted} $gflags {$key}]
+ error_check_good dbdr1:get $ret $drret
+
+ set ret [eval {$dbdr get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest092.e: Check dirty data using dirty txn and clean db/cursor"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.f: Check dirty data using -read_uncommitted cget flag"
+ dump_file_walk \
+ $dbcdr0 $t2 $checkfunc "-first" "-next" "-read_uncommitted"
+
+ puts "\tTest092.g: Check dirty data using -read_uncommitted cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ #
+ # We must close these before aborting the real txn
+ # because they all hold read locks on the pages.
+ #
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+
+ #
+ # Now abort the modifying transaction and rerun the data checks.
+ #
+ puts "\tTest092.h: Aborting the write-txn"
+ error_check_good txnabort [$t abort] 0
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -read_uncommitted]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092cl_recno.check
+ } else {
+ set checkfunc test092cl.check
+ }
+ puts "\tTest092.i: Check clean data using -read_uncommitted cget flag"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.j: Check clean data using -read_uncommitted cget flag"
+ dump_file_walk \
+ $dbcdr0 $t2 $checkfunc "-first" "-next" "-read_uncommitted"
+
+ puts "\tTest092.k: Check clean data using -read_uncommitted cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ # Clean up our handles
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good tdrcommit [$tdr commit] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+ error_check_good dbclose [$dbcl close] 0
+ error_check_good dbclose [$dbdr close] 0
+ error_check_good dbclose [$dbtxn close] 0
+ error_check_good envclose [$env close] 0
+}
+
+# Check functions for test092; keys and data are identical
+# Clean checks mean keys and data are identical.
+# Dirty checks mean data are uppercase versions of keys.
+proc test092cl.check { key data } {
+ error_check_good "key/data mismatch" $key $data
+}
+
+proc test092cl_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
+
+proc test092dr.check { key data } {
+ error_check_good "key/data mismatch" $key [string tolower $data]
+}
+
+proc test092dr_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data \
+ [string toupper $kvals($key)]
+}
+
diff --git a/db-4.8.30/test/test093.tcl b/db-4.8.30/test/test093.tcl
new file mode 100644
index 0000000..b6f7af9
--- /dev/null
+++ b/db-4.8.30/test/test093.tcl
@@ -0,0 +1,434 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test093
+# TEST Test set_bt_compare (btree key comparison function) and
+# TEST set_h_compare (hash key comparison function).
+# TEST
+# TEST Open a database with a comparison function specified,
+# TEST populate, and close, saving a list with that key order as
+# TEST we do so. Reopen and read in the keys, saving in another
+# TEST list; the keys should be in the order specified by the
+# TEST comparison function. Sort the original saved list of keys
+# TEST using the comparison function, and verify that it matches
+# TEST the keys as read out of the database.
+
+proc test093 { method {nentries 10000} {tnum "093"} args} {
+ source ./include.tcl
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] == 1 } {
+ set compflag -btcompare
+ } elseif { [is_hash $method] == 1 } {
+ set compflag -hashcompare
+ } else {
+ puts "Test$tnum: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set envflags [$env get_open_flags]
+
+ # We can't run this test for the -thread option because
+ # the comparison function requires the ability to allocate
+ # memory at the DBT level and our Tcl interface does not
+ # offer that.
+ if { [lsearch -exact $envflags "-thread"] != -1 } {
+ puts "Skipping Test$tnum for threaded env"
+ return
+ }
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ cleanup $testdir $env
+ } else {
+ set env NULL
+ }
+
+ puts "Test$tnum: $method ($args) $nentries entries using $compflag"
+
+ test093_run $omethod $dbargs $nentries $tnum \
+ $compflag test093_cmp1 test093_sort1
+ test093_runbig $omethod $dbargs $nentries $tnum \
+ $compflag test093_cmp1 test093_sort1
+ test093_run $omethod $dbargs $nentries $tnum \
+ $compflag test093_cmp2 test093_sort2
+
+ # Don't bother running the second, really slow, comparison
+ # function on test093_runbig (file contents).
+
+ # Clean up so general verification (without the custom comparison
+ # function) doesn't fail.
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+}
+
+proc test093_run { method dbargs nentries tnum compflag cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # We'll need any encryption args separated from the db args
+ # so we can pass them to dbverify.
+ set encargs ""
+ set dbargs [split_encargs $dbargs encargs]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $dbargs "-env"]
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ set envargs ""
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open $compflag $cmpfunc \
+ -create -mode 0644} $method $encargs $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set txn ""
+
+ # Use btvals to save the order of the keys as they are
+ # written to the database. The btvalsck variable will contain
+ # the values as sorted by the comparison function.
+ set btvals {}
+ set btvalsck {}
+
+ puts "\tTest$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ set ret [eval {$db get $key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test093_check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Run verify to check the internal structure and order.
+ if { [catch {eval {berkdb dbverify} $compflag $cmpfunc\
+ $envargs $encargs {$testfile}} res] } {
+ error "FAIL: Verification failed with $res"
+ }
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filehead $nentries $dict $t2
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest$tnum.c: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+ set btvalsck {}
+ set db [eval {berkdb_open $compflag $cmpfunc -rdonly} \
+ $dbargs $encargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 test093_check "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_hash $method] == 1 || [is_partition_callback $dbargs] == 1 } {
+ return
+ }
+
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest$tnum.d: check file order"
+
+ $sortfunc
+
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+ for {set i 0} {$i < $nentries} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+proc test093_runbig { method dbargs nentries tnum compflag cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # We'll need any encryption args separated from the db args
+ # so we can pass them to dbverify.
+ set encargs ""
+ set dbargs [split_encargs $dbargs encargs]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ set envargs ""
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open $compflag $cmpfunc \
+ -create -mode 0644} $method $encargs $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ puts "\tTest$tnum.e:\
+ big key put/get loop key=filecontents data=filename"
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set key [read $fid]
+ close $fid
+
+ set key $f$key
+
+ set fcopy [open $t5 w]
+ fconfigure $fcopy -translation binary
+ puts -nonewline $fcopy $key
+ close $fcopy
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $f]}]
+ error_check_good put_file $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ puts -nonewline $fid $key
+ }
+ close $fid
+ error_check_good \
+ Test093:diff($t5,$t4) [filecmp $t5 $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.f: big dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test093_checkbig
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Run verify to check the internal structure and order.
+ if { [catch {eval {berkdb dbverify} $compflag $cmpfunc\
+ $envargs $encargs {$testfile}} res] } {
+ error "FAIL: Verification failed with $res"
+ }
+
+ puts "\tTest$tnum.g: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+
+ set btvalsck {}
+ set db [eval {berkdb_open $compflag $cmpfunc -rdonly} \
+ $encargs $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 test093_checkbig "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_hash $method] == 1 || [is_partition_callback $dbargs] == 1 } {
+ return
+ }
+
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest$tnum.h: check file order"
+
+ $sortfunc
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+
+ set end [llength $btvals]
+ for {set i 0} {$i < $end} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+# Simple bt comparison.
+proc test093_cmp1 { a b } {
+ return [string compare $b $a]
+}
+
+# Simple bt sorting.
+proc test093_sort1 {} {
+ global btvals
+ #
+ # This one is easy, just sort in reverse.
+ #
+ set btvals [lsort -decreasing $btvals]
+}
+
+proc test093_cmp2 { a b } {
+ set arev [reverse $a]
+ set brev [reverse $b]
+ return [string compare $arev $brev]
+}
+
+proc test093_sort2 {} {
+ global btvals
+
+ # We have to reverse them, then sorts them.
+ # Then reverse them back to real words.
+ set rbtvals {}
+ foreach i $btvals {
+ lappend rbtvals [reverse $i]
+ }
+ set rbtvals [lsort -increasing $rbtvals]
+ set newbtvals {}
+ foreach i $rbtvals {
+ lappend newbtvals [reverse $i]
+ }
+ set btvals $newbtvals
+}
+
+# Check function for test093; keys and data are identical
+proc test093_check { key data } {
+ global btvalsck
+
+ error_check_good "key/data mismatch" $data [reverse $key]
+ lappend btvalsck $key
+}
+
+# Check function for test093 big keys;
+proc test093_checkbig { key data } {
+ source ./include.tcl
+ global btvalsck
+
+ set fid [open $data r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+ error_check_good "key/data mismatch" $key $data$cont
+ lappend btvalsck $key
+}
+
diff --git a/db-4.8.30/test/test094.tcl b/db-4.8.30/test/test094.tcl
new file mode 100644
index 0000000..aaad3aa
--- /dev/null
+++ b/db-4.8.30/test/test094.tcl
@@ -0,0 +1,206 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test094
+# TEST Test using set_dup_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test094 { method {nentries 10000} {ndups 10} {tnum "094"} args} {
+ source ./include.tcl
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 && [is_hash $method] != 1 } {
+ puts "Test$tnum: skipping for method $method."
+ return
+ }
+
+ # We'll need any encryption args separated from the db args
+ # so we can pass them to dbverify.
+ set encargs ""
+ set dbargs [split_encargs $dbargs encargs]
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum-a.db
+ set env NULL
+ set envargs ""
+ } else {
+ set testfile test$tnum-a.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set envargs " -env $env "
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test$tnum: $method ($args) $nentries \
+ with $ndups dups using dupcompare"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort\
+ -create -mode 0644} $omethod $encargs $dbargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set t1 $testdir/t1
+ set pflags ""
+ set gflags ""
+ set txn ""
+ puts "\tTest$tnum.a: $nentries put/get duplicates loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set dlist {}
+ for {set i 0} {$i < $ndups} {incr i} {
+ set dlist [linsert $dlist 0 $i]
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest$tnum.b: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now run verify to check the internal structure and order.
+ if { [catch {eval {berkdb dbverify} -dupcompare test094_cmp\
+ $envargs $encargs {$testfile}} res] } {
+ puts "FAIL: Verification failed with $res"
+ }
+
+ # Set up second testfile so truncate flag is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test$tnum-b.db
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ #
+ # Test dupcompare with data items big enough to force offpage dups.
+ #
+ puts "\tTest$tnum.c:\
+ big key put/get dup loop key=filename data=filecontents"
+ set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \
+ -create -mode 0644} $omethod $encargs $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+ if { [llength $file_list] > $nentries } {
+ set file_list [lrange $file_list 1 $nentries]
+ }
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+
+ set key $f
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$cont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+
+ puts "\tTest$tnum.d: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_file_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ set testdir [get_home $env]
+ }
+ error_check_good db_close [$db close] 0
+
+ # Run verify to check the internal structure and order.
+ if { [catch {eval {berkdb dbverify} -dupcompare test094_cmp\
+ $envargs $encargs {$testfile}} res] } {
+ puts "FAIL: Verification failed with $res"
+ }
+
+ # Clean up the test directory, otherwise the general verify
+ # (without dupcompare) will fail.
+ cleanup $testdir $env
+}
+
+# Simple dup comparison.
+proc test094_cmp { a b } {
+ return [string compare $b $a]
+}
diff --git a/db-4.8.30/test/test095.tcl b/db-4.8.30/test/test095.tcl
new file mode 100644
index 0000000..afd2817
--- /dev/null
+++ b/db-4.8.30/test/test095.tcl
@@ -0,0 +1,369 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test095
+# TEST Bulk get test for methods supporting dups. [#2934]
+proc test095 { method {tnum "095"} args } {
+ source ./include.tcl
+ global is_je_test
+ global is_qnx_test
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ puts "Skipping for environment with txns"
+ return
+ }
+ set testdir [get_home $env]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test$tnum: $method ($args) Bulk get test"
+
+ # Tcl leaves a lot of memory allocated after this test
+ # is run in the tclsh. This ends up being a problem on
+ # QNX runs as later tests then run out of memory.
+ if { $is_qnx_test } {
+ puts "Test$tnum skipping for QNX"
+ return
+ }
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ # The test's success is dependent on the relationship between
+ # the amount of data loaded and the buffer sizes we pick, so
+ # these parameters don't belong on the command line.
+ set nsets 300
+ set noverflows 25
+
+ # We run the meat of the test twice: once with unsorted dups,
+ # once with sorted dups.
+ foreach { dflag sort } { -dup unsorted {-dup -dupsort} sorted } {
+ if { $is_je_test || [is_compressed $args] } {
+ if { $sort == "unsorted" } {
+ continue
+ }
+ }
+
+ set testfile $basename-$sort.db
+ set did [open $dict]
+
+ # Open and populate the database with $nsets sets of dups.
+ # Each set contains as many dups as its number
+ puts "\tTest$tnum.a:\
+ Creating database with $nsets sets of $sort dups."
+ set dargs "$dflag $carg $args"
+ set db [eval {berkdb_open_noerr -create} \
+ $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t95_populate $db $did $nsets 0
+
+ # Determine the pagesize so we can use it to size the buffer.
+ set stat [$db stat]
+ set pagesize [get_pagesize $stat]
+
+ # Run basic get tests.
+ #
+ # A small buffer will fail if it is smaller than the pagesize.
+ # Skip small buffer tests if the page size is so small that
+ # we can't define a buffer smaller than the page size.
+ # (Buffers must be 1024 or multiples of 1024.)
+ #
+ # A big buffer of 66560 (64K + 1K) should always be large
+ # enough to contain the data, so the test should succeed
+ # on all platforms. We picked this number because it
+ # is larger than the largest allowed pagesize, so the test
+ # always fills more than a page at some point.
+
+ set maxpage [expr 1024 * 64]
+ set bigbuf [expr $maxpage + 1024]
+ set smallbuf 1024
+
+ if { $pagesize > 1024 } {
+ t95_gettest $db $tnum b $smallbuf 1
+ } else {
+ puts "Skipping small buffer test Test$tnum.b"
+ }
+ t95_gettest $db $tnum c $bigbuf 0
+
+ # Run cursor get tests.
+ if { $pagesize > 1024 } {
+ t95_cgettest $db $tnum b $smallbuf 1
+ } else {
+ puts "Skipping small buffer test Test$tnum.d"
+ }
+ t95_cgettest $db $tnum e $bigbuf 0
+
+ # Run invalid flag combination tests
+ # Sync and reopen test file so errors won't be sent to stderr
+ error_check_good db_sync [$db sync] 0
+ set noerrdb [eval berkdb_open_noerr $dargs $testfile]
+ t95_flagtest $noerrdb $tnum f [expr 8192]
+ t95_cflagtest $noerrdb $tnum g [expr 100]
+ error_check_good noerrdb_close [$noerrdb close] 0
+
+ # Set up for overflow tests
+ set max [expr 4096 * $noverflows]
+ puts "\tTest$tnum.h: Add $noverflows overflow sets\
+ to database (max item size $max)"
+ t95_populate $db $did $noverflows 4096
+
+ # Run overflow get tests. The overflow test fails with
+ # our standard big buffer doubled, but succeeds with a
+ # buffer sized to handle $noverflows pairs of data of
+ # size $max.
+ t95_gettest $db $tnum i $bigbuf 1
+ t95_gettest $db $tnum j [expr $bigbuf * 2] 1
+ t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0
+
+ # Run overflow cursor get tests.
+ t95_cgettest $db $tnum l $bigbuf 1
+ # Expand buffer to accommodate basekey as well as the padding.
+ t95_cgettest $db $tnum m [expr ($max + 512) * 2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
+
+proc t95_gettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t95_cgettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+proc t95_flagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 0
+}
+proc t95_cflagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 1
+}
+
+# Basic get test
+proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ foreach flag { multi multi_key } {
+ if { $usecursor == 0 } {
+ if { $flag == "multi_key" } {
+ # db->get does not allow multi_key
+ continue
+ } else {
+ set action "db get -$flag"
+ }
+ } else {
+ set action "dbc get -$flag -set/-next"
+ }
+ puts "\tTest$tnum.$letter: $action with bufsize $bufsize"
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get($flag) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -nextnodup] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch {eval $db get -$flag $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get -$flag $bufsize\
+ -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get -$flag $bufsize\
+ -nextdup} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above
+ # fails; just make sure it's a DB_BUFFER_SMALL or an
+ # EINVAL (if the buffer is smaller than the pagesize,
+ # it's EINVAL), mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ if { [is_substr $errorCode DB_BUFFER_SMALL] != 1 && \
+ [is_substr $errorCode EINVAL] != 1 } {
+ error_check_good \
+ "$flag failure errcode" \
+ $errorCode "DB_BUFFER_SMALL or EINVAL"
+ }
+ set allpassed FALSE
+ continue
+ }
+ error_check_good "get_$flag ($key)" $ret 0
+ if { $flag == "multi_key" } {
+ t95_verify $res TRUE
+ } else {
+ t95_verify $res FALSE
+ }
+ }
+ set ret [catch {eval $db get -$flag $bufsize} res]
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest$tnum.$letter:\
+ returned at least one DB_BUFFER_SMALL (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ }
+}
+
+# Test of invalid flag combinations
+proc t95_flagtest_body { db tnum letter bufsize usecursor } {
+ global errorCode
+
+ foreach flag { multi multi_key } {
+ if { $usecursor == 0 } {
+ if { $flag == "multi_key" } {
+ # db->get does not allow multi_key
+ continue
+ } else {
+ set action "db get -$flag"
+ }
+ } else {
+ set action "dbc get -$flag"
+ }
+ puts "\tTest$tnum.$letter: $action with invalid flag combinations"
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ if { $usecursor == 0 } {
+ # Disallowed flags for db->get
+ set badflags [list consume consume_wait {rmw some_key}]
+
+ foreach badflag $badflags {
+ catch {eval $db get -$flag $bufsize -$badflag} ret
+ error_check_good \
+ db:get:$flag:$badflag [is_substr $errorCode EINVAL] 1
+ }
+ } else {
+ # Disallowed flags for db->cget
+ set cbadflags [list last get_recno join_item \
+ {multi_key 1000} prev prevnodup]
+
+ set dbc [$db cursor]
+ $dbc get -first
+ foreach badflag $cbadflags {
+ catch {eval $dbc get -$flag $bufsize -$badflag} ret
+ error_check_good dbc:get:$flag:$badflag \
+ [is_substr $errorCode EINVAL] 1
+ }
+ error_check_good dbc_close [$dbc close] 0
+ }
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ }
+ puts "\t\tTest$tnum.$letter completed"
+}
+
+# Verify that a passed-in list of key/data pairs all match the predicted
+# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
+proc t95_verify { res multiple_keys } {
+ global alphabet
+
+ set i 0
+ set orig_key [lindex [lindex $res 0] 0]
+ set nkeys [string trim $orig_key $alphabet']
+ set base_key [string trim $orig_key 0123456789]
+ set datum_count 0
+
+ while { 1 } {
+ set key [lindex [lindex $res $i] 0]
+ set datum [lindex [lindex $res $i] 1]
+
+ if { $datum_count >= $nkeys } {
+ if { [llength $key] != 0 } {
+ # If there are keys beyond $nkeys, we'd
+ # better have multiple_keys set.
+ error_check_bad "keys beyond number $i allowed"\
+ $multiple_keys FALSE
+
+ # If multiple_keys is set, accept the new key.
+ set orig_key $key
+ set nkeys [eval string trim \
+ $orig_key {$alphabet'}]
+ set base_key [eval string trim \
+ $orig_key 0123456789]
+ set datum_count 0
+ } else {
+ # datum_count has hit nkeys. We're done.
+ return
+ }
+ }
+
+ error_check_good returned_key($i) $key $orig_key
+ error_check_good returned_datum($i) \
+ $datum $base_key.[format %4u $datum_count]
+ incr datum_count
+ incr i
+ }
+}
+
+# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
+# with "word" having (i * pad_bytes) bytes extra padding.
+proc t95_populate { db did nsets pad_bytes } {
+ set txn ""
+ for { set i 1 } { $i <= $nsets } { incr i } {
+ # basekey is a padded dictionary word
+ gets $did basekey
+
+ append basekey [repeat "a" [expr $pad_bytes * $i]]
+
+ # key is basekey with the number of dups stuck on.
+ set key $basekey$i
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set data $basekey.[format %4u $j]
+ error_check_good db_put($key,$data) \
+ [eval {$db put} $txn {$key $data}] 0
+ }
+ }
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
diff --git a/db-4.8.30/test/test096.tcl b/db-4.8.30/test/test096.tcl
new file mode 100644
index 0000000..e536853
--- /dev/null
+++ b/db-4.8.30/test/test096.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test096
+# TEST Db->truncate test.
+# TEST For all methods:
+# TEST Test that truncate empties an existing database.
+# TEST Test that truncate-write in an aborted txn doesn't
+# TEST change the original contents.
+# TEST Test that truncate-write in a committed txn does
+# TEST overwrite the original contents.
+# TEST For btree and hash, do the same in a database with offpage dups.
+proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} {
+ global fixed_len
+ global alphabet
+ source ./include.tcl
+
+ set orig_tdir $testdir
+ set orig_fixed_len $fixed_len
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ if { [is_partitioned $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ puts "Test096: $method db truncate method test"
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test096: Skipping for specific pagesizes"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ set testfile test096.db
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 0 } {
+ puts "Environment w/o txns specified; skipping."
+ return
+ }
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ set testdir [get_home $env]
+ set closeenv 0
+ } else {
+ env_cleanup $testdir
+
+ # We need an env for exclusive-use testing. Since we are
+ # using txns, we need at least 1 lock per record for queue.
+ set lockmax [expr $nentries * 3]
+ set env [eval {berkdb_env -create -home $testdir \
+ -lock_max_locks $lockmax -lock_max_objects $lockmax \
+ -pagesize $pagesize -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+ set closeenv 1
+ }
+
+ set t1 $testdir/t1
+
+ puts "\tTest096.a: Create database with $nentries entries"
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t96_populate $db $omethod $env $nentries
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.b: Truncate database"
+ set dbtr [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate]
+ error_check_good dbtrunc $ret $nentries
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set number [number_of_entries $db $method]
+ error_check_good number_of_entries $number 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.c: " 0 0 $nodump] 0
+
+ # Remove and recreate database.
+ puts "\tTest096.d: Recreate database with $nentries entries"
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t96_populate $db $omethod $env $nentries
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.e: Truncate and write in a txn, then abort"
+ txn_truncate $env $omethod $args $testfile $nentries abort 1
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Database should have original contents since both the truncate
+ # and the write were aborted
+ set number [number_of_entries $db $method]
+ error_check_good number_of_entries $number $nentries
+ error_check_good dbclose [$db close] 0
+
+ error_check_good dbverify [verify_dir $testdir "\tTest096.f: " 0 0 $nodump] 0
+
+ puts "\tTest096.g: Truncate and write in a txn, then commit"
+ txn_truncate $env $omethod $args $testfile $nentries commit 1
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Database should contain only the new items
+ set number [number_of_entries $db $method]
+ error_check_good number_of_entries $number [expr $nentries / 2]
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.h: " 0 0 $nodump] 0
+
+ puts "\tTest096.i: Check proper handling of overflow pages."
+ # Large keys and data compared to page size guarantee
+ # overflow pages.
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping overflow test for fixed-length method."
+ } else {
+ set overflowfile overflow096.db
+ set data [repeat $alphabet 600]
+ set db [eval {berkdb_open -create -auto_commit -pagesize 512 \
+ -env $env $omethod -mode 0644} $args $overflowfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set noverflows 100
+ for { set i 1 } { $i <= $noverflows } { incr i } {
+ set ret [eval {$db put} \
+ $i [chop_data $method "$i$data"]]
+ }
+
+ # Hash reports pages of type P_OVERFLOW as "big pages", other
+ # access methods as "overflow pages".
+ if { [is_hash $method] == 1 } {
+ set bigpages [stat_field $db stat "Number of big pages"]
+ error_check_good stat:bigpages [expr $bigpages > 0] 1
+ } else {
+ set overflow [stat_field $db stat "Overflow pages"]
+ error_check_good stat:overflow [expr $overflow > 0] 1
+ }
+
+ error_check_good overflow_truncate [$db truncate] $noverflows
+ error_check_good overflow_close [$db close] 0
+ }
+
+ # Remove database and create a new one with unsorted dups.
+ # Skip the rest of the test for methods not supporting dups
+ # and for compression, which does not support unsorted dups.
+ #
+ if { [is_record_based $method] == 1 || \
+ [is_compressed $args] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Skipping remainder of test096."
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ return
+ }
+ set ret [berkdb dbremove -env $env -auto_commit $testfile]
+ set ret [berkdb dbremove -env $env -auto_commit $overflowfile]
+
+ puts "\tTest096.j: Create $nentries entries with $ndups duplicates"
+ set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
+ -create -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ t96_populate $db $omethod $env $nentries $ndups
+
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ dup_check $db $txn $t1 $dlist
+ error_check_good txn [$t commit] 0
+
+ # Make sure there are duplicates.
+ puts "\tTest096.k: Verify off page duplicates status"
+ set duplicate [stat_field $db stat "Duplicate pages"]
+ error_check_good stat:offpage_dups [expr $duplicate > 0] 1
+
+ set recs [expr $ndups * $nentries]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.l: Truncate database in a txn then abort"
+ txn_truncate $env $omethod $args $testfile $recs abort
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set number [number_of_entries $db $method]
+ error_check_good number_of_entries $number $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.m: Truncate database in a txn then commit"
+ txn_truncate $env $omethod $args $testfile $recs commit
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set number [number_of_entries $db $method]
+ error_check_good number_of_entries $number 0
+ error_check_good dbclose [$db close] 0
+
+ set testdir [get_home $env]
+ error_check_good dbverify \
+ [verify_dir $testdir "\tTest096.n: " 0 0 $nodump] 0
+
+ # Remove database, and create a new one with dups. Test
+ # truncate + write within a transaction.
+ puts "\tTest096.o: Create $nentries entries with $ndups duplicates"
+ set ret [berkdb dbremove -env $env -auto_commit $testfile]
+ set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
+ -create -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ t96_populate $db $omethod $env $nentries $ndups
+
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ dup_check $db $txn $t1 $dlist
+ error_check_good txn [$t commit] 0
+
+ puts "\tTest096.p: Verify off page duplicates status"
+ set duplicate [stat_field $db stat "Duplicate pages"]
+ error_check_good stat:offpage [expr $duplicate > 0] 1
+
+ set recs [expr $ndups * $nentries]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.q: Truncate and write in a txn, then abort"
+ txn_truncate $env $omethod $args $testfile $recs abort 1
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set number [number_of_entries $db $method]
+ error_check_good number_of_entries $number $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.r: Truncate and write in a txn, then commit"
+ txn_truncate $env $omethod $args $testfile $recs commit 1
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set number [number_of_entries $db $method]
+ error_check_good number_of_entries $number [expr $recs / 2]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.s: Check overflow pages with dups."
+ set ndups 3
+ set db [eval {berkdb_open -create -auto_commit -pagesize 512 \
+ -env $env $omethod -dup -mode 0644} $args $overflowfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $noverflows } { incr i } {
+ for { set j 0 } { $j < $ndups } { incr j } {
+ set ret [eval {$db put} \
+ $i [chop_data $method "$i.$j$data"]]
+ }
+ }
+
+ # Hash reports pages of type P_OVERFLOW as "big pages", other
+ # access methods as "overflow pages".
+ if { [is_hash $method] == 1 } {
+ set bigpages [stat_field $db stat "Number of big pages"]
+ error_check_good stat:bigpages [expr $bigpages > 0] 1
+ } else {
+ set overflow [stat_field $db stat "Overflow pages"]
+ error_check_good stat:overflow [expr $overflow > 0] 1
+ }
+
+ set nentries [expr $noverflows * $ndups]
+ error_check_good overflow_truncate [$db truncate] $nentries
+ error_check_good overflow_close [$db close] 0
+
+ set testdir [get_home $env]
+ error_check_good dbverify [verify_dir $testdir "\tTest096.t: " 0 0 $nodump] 0
+
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ set testdir $orig_tdir
+}
+
+proc t96_populate {db method env nentries {ndups 1}} {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $ndups > 1 } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ }
+ } else {
+ set datastr [reverse $str]
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ }
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] $ndups
+ incr count
+ }
+ close $did
+}
+
+proc number_of_entries { db method } {
+ if { [is_record_based $method] == 1 } {
+ set dbc [$db cursor]
+ set last [$dbc get -last]
+ if {[llength $last] == 0} {
+ set number 0
+ } else {
+ set number [lindex [lindex $last 0] 0]
+ }
+ } else {
+ set ret [$db get -glob *]
+ set number [llength $ret]
+ }
+ return $number
+}
+
+# Open database. Truncate in a transaction, optionally with a write
+# included in the transaction as well, then abort or commit. Close database.
+
+proc txn_truncate { env method args testfile nentries op {write 0}} {
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $method -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set ret [$db truncate -txn $txn]
+ error_check_good dbtrunc $ret $nentries
+ if { $write == 1 } {
+ for {set i 1} {$i <= [expr $nentries / 2]} {incr i} {
+ set ret [eval {$db put} -txn $txn \
+ {$i [chop_data $method "aaaaaaaaaa"]}]
+ error_check_good write $ret 0
+ }
+ }
+
+ error_check_good txn$op [$txn $op] 0
+ error_check_good db_close [$db close] 0
+}
+
diff --git a/db-4.8.30/test/test097.tcl b/db-4.8.30/test/test097.tcl
new file mode 100644
index 0000000..c2c847a
--- /dev/null
+++ b/db-4.8.30/test/test097.tcl
@@ -0,0 +1,192 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test097
+# TEST Open up a large set of database files simultaneously.
+# TEST Adjust for local file descriptor resource limits.
+# TEST Then use the first 1000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original.
+
+proc test097 { method {ndbs 500} {nentries 400} args } {
+ global pad_datastr
+ source ./include.tcl
+
+ set largs [convert_args $method $args]
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ # Open an environment, with a 1MB cache.
+ set eindex [lsearch -exact $largs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $largs $eindex]
+ puts "Test097: $method: skipping for env $env"
+ return
+ }
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -log_regionmax 131072 \
+ -pagesize 512 -cachesize { 0 1048576 1 } -txn} \
+ -home $testdir $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ if { [is_partitioned $args] == 1 } {
+ set ndbs [expr $ndbs / 10]
+ }
+
+ # Create the database and open the dictionary
+ set basename test097
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ #
+ # When running with HAVE_MUTEX_SYSTEM_RESOURCES,
+ # we can run out of mutex lock slots due to the nature of this test.
+ # So, for this test, increase the number of pages per extent
+ # to consume fewer resources.
+ #
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ set eindex [lsearch -exact $largs "-extent"]
+ error_check_bad extent $eindex -1
+ incr eindex
+ set extval [lindex $largs $eindex]
+ set extval [expr $extval * 4]
+ set largs [lreplace $largs $eindex $eindex $extval]
+ }
+ puts -nonewline "Test097: $method ($largs) "
+ puts "$nentries entries in at most $ndbs simultaneous databases"
+
+ puts "\tTest097.a: Simultaneous open"
+ set numdb [test097_open tdb $ndbs $method $env $basename $largs]
+ if { $numdb == 0 } {
+ puts "\tTest097: Insufficient resources available -- skipping."
+ error_check_good envclose [$env close] 0
+ return
+ }
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ puts "\tTest097.b: put/get on $numdb databases"
+ set datastr "abcdefghij"
+ set pad_datastr [pad_data $method $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ for { set i 1 } { $i <= $numdb } { incr i } {
+ set ret [eval {$tdb($i) put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ set ret [eval {$tdb($i) get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $datastr]]]
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest097.c: dump and check files"
+ for { set j 1 } { $j <= $numdb } { incr j } {
+ dump_file $tdb($j) $txn $t1 test097.check
+ error_check_good db_close [$tdb($j) close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+# Check function for test097; data should be fixed are identical
+proc test097.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
+
+proc test097_open { tdb ndbs method env basename largs } {
+ global errorCode
+ upvar $tdb db
+
+ set j 0
+ set numdb $ndbs
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ }
+ set omethod [convert_method $method]
+ for { set i 1 } {$i <= $numdb } { incr i } {
+ set stat [catch {eval {berkdb_open -env $env \
+ -pagesize 512 -create -mode 0644} \
+ $largs {$omethod $basename.$i.db}} db($i)]
+ #
+ # Check if we've reached our limit
+ #
+ if { $stat == 1 } {
+ set min 20
+ set em [is_substr $errorCode EMFILE]
+ set en [is_substr $errorCode ENFILE]
+ error_check_good open_ret [expr $em || $en] 1
+ puts \
+ "\tTest097.a.1 Encountered resource limits opening $i files, adjusting"
+ if { [is_queueext $method] } {
+ set end [expr $j / 4]
+ set min 10
+ } else {
+ set end [expr $j - 10]
+ }
+ #
+ # If we cannot open even $min files, then this test is
+ # not very useful. Close up shop and go back.
+ #
+ if { $end < $min } {
+ test097_close db 1 $j
+ return 0
+ }
+ test097_close db [expr $end + 1] $j
+ return $end
+ } else {
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set j $i
+ }
+ }
+ return $j
+}
+
+proc test097_close { tdb start end } {
+ upvar $tdb db
+
+ for { set i $start } { $i <= $end } { incr i } {
+ error_check_good db($i)close [$db($i) close] 0
+ }
+}
diff --git a/db-4.8.30/test/test098.tcl b/db-4.8.30/test/test098.tcl
new file mode 100644
index 0000000..8d78ed5
--- /dev/null
+++ b/db-4.8.30/test/test098.tcl
@@ -0,0 +1,90 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test098
+# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and
+# TEST a secondary, and do a normal cursor get followed by a get_recno.
+# TEST (This is a smoke test for "Bug #1" in [#5811].)
+
+proc test098 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest098: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ set txn ""
+ set auto ""
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set base $testdir/test098
+ set env NULL
+ } else {
+ set base test098
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test098: Skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set auto " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest098.a: Set up databases."
+
+ set adb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-primary.db]
+ error_check_good adb_create [is_valid_db $adb] TRUE
+
+ set bdb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-secondary.db]
+ error_check_good bdb_create [is_valid_db $bdb] TRUE
+
+ set ret [eval $adb associate $auto [callback_n 0] $bdb]
+ error_check_good associate $ret 0
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$adb put} $txn aaa data1]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set bc [$bdb cursor]
+ error_check_good cursor [is_valid_cursor $bc $bdb] TRUE
+
+ puts "\tTest098.b: c_get(DB_FIRST) on the secondary."
+ error_check_good get_first [$bc get -first] \
+ [list [list [[callback_n 0] aaa data1] data1]]
+
+ puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary."
+ error_check_good get_recno [$bc get -get_recno] 1
+
+ error_check_good c_close [$bc close] 0
+
+ error_check_good bdb_close [$bdb close] 0
+ error_check_good adb_close [$adb close] 0
+}
diff --git a/db-4.8.30/test/test099.tcl b/db-4.8.30/test/test099.tcl
new file mode 100644
index 0000000..c286c2b
--- /dev/null
+++ b/db-4.8.30/test/test099.tcl
@@ -0,0 +1,275 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test099
+# TEST
+# TEST Test of DB->get and DBC->c_get with set_recno and get_recno.
+# TEST
+# TEST Populate a small btree -recnum database.
+# TEST After all are entered, retrieve each using -recno with DB->get.
+# TEST Open a cursor and do the same for DBC->c_get with set_recno.
+# TEST Verify that set_recno sets the record number position properly.
+# TEST Verify that get_recno returns the correct record numbers.
+# TEST
+# TEST Using the same database, open 3 cursors and position one at
+# TEST the beginning, one in the middle, and one at the end. Delete
+# TEST by cursor and check that record renumbering is done properly.
+#
+proc test099 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test099: Test of set_recno and get_recno in DBC->c_get."
+ if { [is_rbtree $method] != 1 } {
+ puts "Test099: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test099.db
+ set env NULL
+ } else {
+ set testfile test099.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ # Create the database and open the dictionary
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 1
+
+ append gflags " -recno"
+
+ puts "\tTest099.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count <= $nentries } {
+ set key $str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ puts "\tTest099.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test099.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest099.c: Test set_recno then get_recno"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set did [open $t1]
+ set recno 1
+
+ # Create key(recno) array to use for later comparison
+ while { [gets $did str] != -1 } {
+ set kvals($recno) $str
+ incr recno
+ }
+
+ set recno 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get_first [llength $ret] 0
+
+ # First walk forward through the database ....
+ while { $recno < $count } {
+ # Test set_recno: verify it sets the record number properly.
+ set current [$dbc get -current]
+ set r [$dbc get -set_recno $recno]
+ error_check_good set_recno $current $r
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set ret [$dbc get -next]
+ incr recno
+ }
+
+ # ... and then backward.
+ set recno [expr $count - 1]
+ while { $recno > 0 } {
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set r [$dbc get -set_recno $recno]
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set recno [expr $recno - 1]
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+
+ puts "\tTest099.d: Test record renumbering with cursor deletes."
+ # Reopen the database, this time with write permission.
+ set db [eval {berkdb_open} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open three cursors.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc0 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc0 $db] TRUE
+ set dbc1 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc1 $db] TRUE
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+
+ # Initialize cursor positions. Set dbc0 at the beginning,
+ # dbc1 at the middle, and dbc2 at the end.
+ set ret [$dbc0 get -first]
+ error_check_bad dbc0_get_first [llength $ret] 0
+
+ set middle [expr $nentries / 2 + 1]
+ set ret [$dbc1 get -set_recno $middle]
+ error_check_bad dbc1_get_middle [llength $ret] 0
+
+ set ret [$dbc2 get -last]
+ error_check_bad dbc2_get_last [llength $ret] 0
+
+ # At each iteration, delete the first entry, delete the middle
+ # entry, and check the record number for beginning, middle and end.
+ set count 1
+ while { $count <= [expr $nentries / 2] } {
+ # Delete first item.
+ error_check_good dbc0_del [$dbc0 del] 0
+
+ # For non-txn env's, check that db_stat is recalculating
+ # to adjust for items marked for deletion. We can't do this
+ # in txn env's because the live txn will cause deadlock.
+ if { $txnenv == 0 } {
+ set nkeys [expr $nentries - [expr $count * 2] + 1]
+ set stat [$db stat]
+ error_check_good keys_after_delete [is_substr $stat \
+ "{Number of keys} $nkeys"] 1
+ error_check_good records_after_delete [is_substr $stat \
+ "{Number of records} $nkeys"] 1
+
+ # Now delete the same entry again (which should not
+ # change the database) and make sure db->stat returns
+ # the same number of keys and records as before.
+ catch {[$dbc0 del]} result
+
+ set stat [$db stat]
+ error_check_good keys_after_baddelete [is_substr $stat \
+ "{Number of keys} $nkeys"] 1
+ error_check_good recs_after_baddelete [is_substr $stat \
+ "{Number of records} $nkeys"] 1
+ }
+
+ # Reposition cursor to new first item, check that record number
+ # is 1.
+ set ret0 [$dbc0 get -next]
+ error_check_good beginning_recno [$dbc0 get -get_recno] 1
+
+ # Calculate the current middle recno and compare to actual.
+ set middle [$dbc1 get -get_recno]
+ set calcmiddle [expr [expr $nentries / 2] - $count + 1]
+ error_check_good middle_recno $middle $calcmiddle
+
+ # Delete middle item, reposition cursor to next item.
+ error_check_good dbc1_del [$dbc1 del] 0
+ set ret1 [$dbc1 get -next]
+
+ # Calculate the expected end recno and compare to actual.
+ set end [$dbc2 get -get_recno]
+ set calcend [expr $nentries - [expr $count * 2]]
+ # On the last iteration, all items have been deleted so
+ # there is no recno.
+ if { $calcend == 0 } {
+ error_check_good end_recno $end ""
+ } else {
+ error_check_good end_recno $end $calcend
+ }
+ incr count
+ }
+
+ # Close all three cursors.
+ error_check_good cursor_close [$dbc0 close] 0
+ error_check_good cursor_close [$dbc1 close] 0
+ error_check_good cursor_close [$dbc2 close] 0
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Check function for dumped file; data should be fixed are identical
+proc test099.check { key data } {
+ error_check_good "data mismatch for key $key" $key $data
+}
diff --git a/db-4.8.30/test/test100.tcl b/db-4.8.30/test/test100.tcl
new file mode 100644
index 0000000..2569694
--- /dev/null
+++ b/db-4.8.30/test/test100.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test100
+# TEST Test for functionality near the end of the queue
+# TEST using test025 (DB_APPEND).
+proc test100 { method {nentries 10000} {tnum "100"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test$tnum for $method."
+ return;
+ }
+ eval {test025 $method $nentries 4294967000 $tnum} $args
+}
diff --git a/db-4.8.30/test/test101.tcl b/db-4.8.30/test/test101.tcl
new file mode 100644
index 0000000..0e31ac6
--- /dev/null
+++ b/db-4.8.30/test/test101.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test101
+# TEST Test for functionality near the end of the queue
+# TEST using test070 (DB_CONSUME).
+proc test101 { method {nentries 1000} {txn -txn} {tnum "101"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test$tnum for $method."
+ return;
+ }
+ eval {test070 $method 4 2 $nentries WAIT 4294967000 $txn $tnum} $args
+}
diff --git a/db-4.8.30/test/test102.tcl b/db-4.8.30/test/test102.tcl
new file mode 100644
index 0000000..ff623d7
--- /dev/null
+++ b/db-4.8.30/test/test102.tcl
@@ -0,0 +1,234 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test102
+# TEST Bulk get test for record-based methods. [#2934]
+proc test102 { method {nsets 1000} {tnum "102"} args } {
+ source ./include.tcl
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_rbtree $method] == 1 || [is_record_based $method] == 0} {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ puts "Skipping for environment with txns"
+ return
+ }
+ set testdir [get_home $env]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test$tnum: $method ($args) Bulk get test"
+
+ # Open and populate the database.
+ puts "\tTest$tnum.a: Creating $method database\
+ with $nsets entries."
+ set dargs "$carg $args"
+ set testfile $basename.db
+ set db [eval {berkdb_open_noerr -create} $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t102_populate $db $method $nsets $txnenv 0
+
+ # Determine the pagesize so we can use it to size the buffer.
+ set stat [$db stat]
+ set pagesize [get_pagesize $stat]
+
+ # Run get tests. The gettest should succeed as long as
+ # the buffer is at least as large as the page size. Test for
+ # failure of a small buffer unless the page size is so small
+ # we can't define a smaller buffer (buffers must be multiples
+ # of 1024). A "big buffer" should succeed in all cases because
+ # we define it to be larger than 65536, the largest page
+ # currently allowed.
+ set maxpage [expr 1024 * 64]
+ set bigbuf [expr $maxpage + 1024]
+ set smallbuf 1024
+
+ # Run regular db->get tests.
+ if { $pagesize > 1024 } {
+ t102_gettest $db $tnum b $smallbuf 1
+ } else {
+ puts "Skipping Test$tnum.b for small pagesize."
+ }
+ t102_gettest $db $tnum c $bigbuf 0
+
+ # Run cursor get tests.
+ if { $pagesize > 1024 } {
+ t102_gettest $db $tnum d $smallbuf 1
+ } else {
+ puts "Skipping Test$tnum.b for small pagesize."
+ }
+ t102_cgettest $db $tnum e $bigbuf 0
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping overflow tests for fixed-length method $omethod."
+ } else {
+
+ # Set up for overflow tests
+ puts "\tTest$tnum.f: Growing database with overflow sets"
+ t102_populate $db $method [expr $nsets / 100] $txnenv 10000
+
+ # Run overflow get tests. Test should fail for overflow pages
+ # with our standard big buffer but succeed at twice that size.
+ t102_gettest $db $tnum g $bigbuf 1
+ t102_gettest $db $tnum h [expr $bigbuf * 2] 0
+
+ # Run overflow cursor get tests. Test will fail for overflow
+ # pages with 8K buffer but succeed with a large buffer.
+ t102_cgettest $db $tnum i 8192 1
+ t102_cgettest $db $tnum j $bigbuf 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+proc t102_gettest { db tnum letter bufsize expectfail } {
+ t102_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t102_cgettest { db tnum letter bufsize expectfail } {
+ t102_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+
+# Basic get test
+proc t102_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ foreach flag { multi multi_key } {
+ if { $usecursor == 0 } {
+ if { $flag == "multi_key" } {
+ # db->get does not allow multi_key
+ continue
+ } else {
+ set action "db get -$flag"
+ }
+ } else {
+ set action "dbc get -$flag -set/-next"
+ }
+ puts "\tTest$tnum.$letter: $action with bufsize $bufsize"
+
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good \
+ getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get($flag) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch \
+ {eval $db get -$flag $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get\
+ -$flag $bufsize -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get\
+ -$flag $bufsize -next} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above
+ # fails; just make sure it's a DB_BUFFER_SMALL or an
+ # EINVAL (if the buffer is smaller than the pagesize,
+ # it's EINVAL), mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ if { [is_substr $errorCode DB_BUFFER_SMALL] != 1 && \
+ [is_substr $errorCode EINVAL] != 1 } {
+ error_check_good \
+ "$flag failure errcode" \
+ $errorCode "DB_BUFFER_SMALL or EINVAL"
+ }
+ set allpassed FALSE
+ continue
+ }
+ error_check_good "get_$flag ($key)" $ret 0
+ }
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest$tnum.$letter:\
+ returned at least one DB_BUFFER_SMALL (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ }
+}
+
+proc t102_populate { db method nentries txnenv pad_bytes } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags " -recno "
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key [expr $count + 1]
+ set datastr $str
+ # Create overflow pages only if method is not fixed-length.
+ if { [is_fixed_length $method] == 0 } {
+ append datastr [repeat "a" $pad_bytes]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] 1
+ incr count
+ }
+ close $did
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
+
diff --git a/db-4.8.30/test/test103.tcl b/db-4.8.30/test/test103.tcl
new file mode 100644
index 0000000..1dc056c
--- /dev/null
+++ b/db-4.8.30/test/test103.tcl
@@ -0,0 +1,222 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test103
+# TEST Test bulk get when record numbers wrap around.
+# TEST
+# TEST Load database with items starting before and ending after
+# TEST the record number wrap around point. Run bulk gets (-multi_key)
+# TEST with various buffer sizes and verify the contents returned match
+# TEST the results from a regular cursor get.
+# TEST
+# TEST Then delete items to create a sparse database and make sure it
+# TEST still works. Test both -multi and -multi_key since they behave
+# TEST differently.
+proc test103 { method {nentries 100} {start 4294967250} {tnum "103"} args} {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test$tnum: $method ($args) Test of bulk get with wraparound."
+
+ if { [is_queueext $method] == 0 } {
+ puts "\tSkipping Test$tnum for method $method."
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open_noerr \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Find the pagesize so we can use it to size the buffer.
+ set stat [$db stat]
+ set pagesize [get_pagesize $stat]
+
+ set did [open $dict]
+
+ puts "\tTest$tnum.a: put/get loop"
+ set txn ""
+
+ # Here is the loop where we put each key/data pair
+ set count 0
+ set k [expr $start + 1]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ #
+ # We cannot use 'incr' because it gets unhappy since
+ # expr above is using 64-bits.
+ set k [expr $k + 1]
+ #
+ # Detect if we're more than 32 bits now. If so, wrap
+ # our key back to 1.
+ #
+ if { [expr $k > 0xffffffff] } {
+ set k 1
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Run tests in verbose mode for debugging.
+ set verbose 0
+
+ puts "\tTest$tnum.b: Bulk get with large buffer (retrieves all data)."
+ # Buffer is large enough that everything fits in a single get.
+ check_multi_recno $db [expr $pagesize * $nentries] multi_key $verbose
+
+ puts "\tTest$tnum.c: Bulk get with buffer = (2 x pagesize)."
+ # Buffer gets several items at a get, but not all.
+ check_multi_recno $db [expr $pagesize * 2] multi_key $verbose
+
+ # Skip tests if buffer would be smaller than allowed.
+ if { $pagesize >= 1024 } {
+ puts "\tTest$tnum.d: Bulk get with buffer = pagesize."
+ check_multi_recno $db $pagesize multi_key $verbose
+ }
+
+ if { $pagesize >= 2048 } {
+ puts "\tTest$tnum.e: Bulk get with buffer < pagesize\
+ (returns EINVAL)."
+ catch {
+ check_multi_recno $db [expr $pagesize / 2] \
+ multi_key $verbose
+ } res
+ error_check_good \
+ bufsize_less_than_pagesize [is_substr $res "invalid"] 1
+ }
+
+ # For a sparsely populated database, test with both -multi_key and
+ # -multi. In any sort of record numbered database, -multi does not
+ # return keys, so it returns all items. -multi_key returns both keys
+ # and data so it skips deleted items.
+ puts "\tTest$tnum.f: Delete every 10th item to create sparse database."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [ eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $curs $db] TRUE
+
+ set count 0
+ for { set kd [$curs get -first] } { $count < $nentries } \
+ { set kd [$curs get -next] } {
+ if { [expr $count % 10 == 0] } {
+ error_check_good cdelete [$curs del] 0
+ }
+ incr count
+ }
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest$tnum.g: Sparse database, large buffer, multi_key."
+ check_multi_recno $db [expr $pagesize * $nentries] multi_key $verbose
+ puts "\tTest$tnum.h: Sparse database, large buffer, multi."
+ check_multi_recno $db [expr $pagesize * $nentries] multi $verbose
+
+ puts "\tTest$tnum.i: \
+ Sparse database, buffer = (2 x pagesize), multi_key."
+ check_multi_recno $db [expr $pagesize * 2] multi_key $verbose
+ puts "\tTest$tnum.j: Sparse database, buffer = (2 x pagesize), multi."
+ check_multi_recno $db [expr $pagesize * 2] multi $verbose
+
+ if { $pagesize >= 1024 } {
+ puts "\tTest$tnum.k: \
+ Sparse database, buffer = pagesize, multi_key."
+ check_multi_recno $db $pagesize multi_key $verbose
+ puts "\tTest$tnum.k: Sparse database, buffer = pagesize, multi."
+ check_multi_recno $db $pagesize multi $verbose
+ }
+
+ error_check_good db_close [$db close] 0
+}
+
+# The proc check_multi_recno is a modification of the utility routine
+# check_multi_key specifically for recno methods. We use this instead
+# check_multi, even with the -multi flag, because the check_multi utility
+# assumes that dups are being used which can't happen with record-based
+# methods.
+proc check_multi_recno { db size flag {verbose 0}} {
+ source ./include.tcl
+ set c [eval { $db cursor} ]
+ set m [eval { $db cursor} ]
+
+ set j 1
+
+ # Walk the database with -multi_key or -multi bulk get.
+ for {set d [$m get -first -$flag $size] } { [llength $d] != 0 } {
+ set d [$m get -next -$flag $size] } {
+ if {$verbose == 1 } {
+ puts "FETCH $j"
+ incr j
+ }
+ # For each bulk get return, compare the results to what we
+ # get by walking the db with an ordinary cursor get.
+ for {set i 0} { $i < [llength $d] } { incr i } {
+ set kd [lindex $d $i]
+ set k [lindex $kd 0]
+ set data [lindex $kd 1]
+ set len [string length $data]
+
+ if {$verbose == 1 } {
+ puts ">> $k << >> $len << "
+ }
+ # If we hit a deleted item in -multi, skip over it.
+ if { $flag == "multi" && $len == 0 } {
+ continue
+ }
+
+ set check [$c get -next]
+ set cd [lindex $check 0]
+ set ck [lindex $cd 0]
+ set cdata [lindex $cd 1]
+
+ error_check_good key $k $ck
+ error_check_good data_len $len [string length $cdata]
+ error_check_good data $data $cdata
+ }
+ }
+}
diff --git a/db-4.8.30/test/test106.tcl b/db-4.8.30/test/test106.tcl
new file mode 100644
index 0000000..a9177bd
--- /dev/null
+++ b/db-4.8.30/test/test106.tcl
@@ -0,0 +1,113 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test106
+# TEST
+# TEST
+# TEST
+proc test106 { method {nitems 100} {niter 200} {tnum "106"} args } {
+ source ./include.tcl
+ global dict
+ global rand_init
+
+ # Set random seed for use in t106script procs op2 and create_data.
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+
+ if { [is_btree $method] != 1 } {
+ puts "\tTest$tnum: Skipping for method $method."
+ return
+ }
+
+ # Skip for specified pagesizes. This test runs at the native
+ # pagesize. (For SR #7964 testing, we may need to force
+ # to 8192.)
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test$tnum: Skipping for specific pagesizes"
+ return
+ }
+
+ # This test needs a txn-enabled environment. If one is not
+ # provided, create it.
+ #
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set env \
+ [eval {berkdb_env -create -home $testdir -txn} $pageargs]
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv != 1 } {
+ puts "Skipping test$tnum for non-txn environment."
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+
+ # The bulk of the work of this test is done in t106script.tcl.
+ # Here we kick off one consumer, then five producers, then sit
+ # back and wait for them to finish.
+ foreach order { ordered random } {
+ set nproducers 5
+
+ puts "\tTest$tnum.a: Start deadlock detector ($order)."
+ set dpid [exec $util_path/db_deadlock -a o -v -t 5\
+ -h $testdir >& $testdir/dd.out &]
+
+ puts "\tTest$tnum.b: Start consumer process ($order)."
+ sentinel_init
+ set pidlist {}
+ set cpid [exec $tclsh_path $test_path/wrap.tcl t106script.tcl \
+ $testdir/t106script.log.cons.$order.1 $testdir WAIT \
+ 0 $nproducers $testdir/CONSUMERLOG 1 $tnum $order $niter \
+ $args &]
+ lappend pidlist $cpid
+
+ puts "\tTest$tnum.c: Initialize producers ($order)."
+ for { set p 1 } { $p <= $nproducers } { incr p } {
+ set ppid [exec $tclsh_path $test_path/wrap.tcl \
+ t106script.tcl \
+ $testdir/t106script.log.init.$order.$p \
+ $testdir INITIAL $nitems $nproducers \
+ $testdir/INITLOG.$p $p $tnum \
+ $order $niter $args &]
+ lappend pidlist $ppid
+ }
+
+ # Wait for all producers to be initialized before continuing
+ # to the RMW portion of the test.
+ watch_procs $pidlist 10
+
+ sentinel_init
+ set pidlist {}
+ puts "\tTest$tnum.d: Run producers in RMW mode ($order)."
+ for { set p 1 } { $p <= $nproducers } { incr p } {
+ set ppid [exec $tclsh_path $test_path/wrap.tcl \
+ t106script.tcl \
+ $testdir/t106script.log.prod.$order.$p \
+ $testdir PRODUCE $nitems $nproducers \
+ $testdir/PRODUCERLOG.$p $p $tnum \
+ $order $niter $args &]
+ lappend pidlist $ppid
+ }
+
+ watch_procs $pidlist 10
+ tclkill $dpid
+ }
+
+ # If this test created the env, close it.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
diff --git a/db-4.8.30/test/test107.tcl b/db-4.8.30/test/test107.tcl
new file mode 100644
index 0000000..2911270
--- /dev/null
+++ b/db-4.8.30/test/test107.tcl
@@ -0,0 +1,168 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test107
+# TEST Test of read-committed (degree 2 isolation). [#8689]
+# TEST
+# TEST We set up a database. Open a read-committed transactional cursor and
+# TEST a regular transactional cursor on it. Position each cursor on one page,
+# TEST and do a put to a different page.
+# TEST
+# TEST Make sure that:
+# TEST - the put succeeds if we are using degree 2 isolation.
+# TEST - the put deadlocks within a regular transaction with
+# TEST a regular cursor.
+# TEST
+proc test107 { method args } {
+ source ./include.tcl
+ global fixed_len
+ global passwd
+ set tnum "107"
+
+ set pageargs ""
+ split_pageargs $args pageargs
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+
+ # We'll make the data pretty good sized so we can easily
+ # move to a different page. Make the data size a little
+ # smaller for fixed-length methods so it works with
+ # pagesize 512 tests.
+ set data_size 512
+ set orig_fixed_len $fixed_len
+ set fixed_len [expr $data_size - [expr $data_size / 8]]
+ set args [convert_args $method $args]
+ set encargs ""
+ set ddargs ""
+ set args [split_encargs $args encargs]
+ if { $encargs != "" } {
+ set ddargs " -P $passwd "
+ }
+ set omethod [convert_method $method]
+
+ puts "Test$tnum: Degree 2 Isolation Test ($method $args)"
+ set testfile test$tnum.db
+ env_cleanup $testdir
+
+ # Create the environment.
+ set timeout 10
+ set env [eval {berkdb_env -create -mode 0644 -lock \
+ -cachesize { 0 1048576 1 } \
+ -lock_timeout $timeout -txn} $pageargs $encargs -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Create the database.
+ set db [eval {berkdb_open -env $env -create -auto_commit\
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.a: put loop"
+ # The data doesn't need to change from key to key.
+ # Use numerical keys so we don't need special handling for
+ # record-based methods.
+ set origdata "data"
+ set len [string length $origdata]
+ set data [repeat $origdata [expr $data_size / $len]]
+ set nentries 200
+ set txn [$env txn]
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set ret [eval {$db put} \
+ -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put:$db $ret 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+
+ puts "\tTest$tnum.b: Start deadlock detector."
+ # Start up a deadlock detector so we can break self-deadlocks.
+ set dpid [eval {exec $util_path/db_deadlock} -v -ae -t 1.0 \
+ -h $testdir $ddargs >& $testdir/dd.out &]
+
+ puts "\tTest$tnum.c: Open txns and cursors."
+ # We can get degree 2 isolation with either a degree 2
+ # txn or a degree 2 cursor or both. However, the case
+ # of a regular txn and regular cursor should deadlock.
+ # We put this case last so it won't deadlock the cases
+ # which should succeed.
+ #
+ # Cursors and transactions are named according to
+ # whether they specify degree 2 (c2, t2) or not (c, t).
+ # Set up all four possibilities.
+ #
+ set t [$env txn]
+ error_check_good reg_txn_begin [is_valid_txn $t $env] TRUE
+ set t2 [$env txn -read_committed]
+ error_check_good deg2_txn_begin [is_valid_txn $t2 $env] TRUE
+
+ set c2t [$db cursor -txn $t -read_committed]
+ error_check_good valid_c2t [is_valid_cursor $c2t $db] TRUE
+ set ct2 [$db cursor -txn $t2]
+ error_check_good valid_ct2 [is_valid_cursor $ct2 $db] TRUE
+ set c2t2 [$db cursor -txn $t2 -read_committed]
+ error_check_good valid_c2t2 [is_valid_cursor $c2t2 $db] TRUE
+ set ct [$db cursor -txn $t]
+ error_check_good valid_ct [is_valid_cursor $ct $db] TRUE
+
+ set curslist [list $c2t $ct2 $c2t2 $ct]
+ set newdata newdata
+ set offpagekey [expr $nentries - 1]
+
+ # For one cursor at a time, read the first item in the
+ # database, then move to an item on a different page.
+ # Put a new value in the first item on the first page. This
+ # should work with degree 2 isolation and hang without it.
+ #
+ # Wrap the whole thing in a catch statement so we still get
+ # around to killing the deadlock detector and cleaning up
+ # even if the test fails.
+ #
+ puts "\tTest$tnum.d: Test for read-committed (degree 2 isolation)."
+ set status [catch {
+ foreach cursor $curslist {
+ set retfirst [$cursor get -first]
+ set firstkey [lindex [lindex $retfirst 0] 0]
+ set ret [$cursor get -set $offpagekey]
+ error_check_good cursor_off_page \
+ [lindex [lindex $ret 0] 0] $offpagekey
+ if { [catch {eval {$db put} \
+ $firstkey [chop_data $method $newdata]} res]} {
+ error_check_good error_is_deadlock \
+ [is_substr $res DB_LOCK_DEADLOCK] 1
+ error_check_good right_cursor_failed $cursor $ct
+ } else {
+ set ret [lindex [lindex [$db get $firstkey] 0] 1]
+ error_check_good data_changed \
+ $ret [pad_data $method $newdata]
+ error_check_bad right_cursor_succeeded $cursor $ct
+ }
+ error_check_good close_cursor [$cursor close] 0
+ }
+ } res]
+ if { $status != 0 } {
+ puts $res
+ }
+
+ # Smoke test for db_stat -txn -read_committed.
+ puts "\tTest$tnum.e: Smoke test for db_stat -txn -read_committed"
+ if { [catch {set statret [$db stat -txn $t -read_committed]} res] } {
+ puts "FAIL: db_stat -txn -read_committed returned $res"
+ }
+
+ # End deadlock detection and clean up handles
+ puts "\tTest$tnum.f: Clean up."
+ tclkill $dpid
+ set fixed_len $orig_fixed_len
+ error_check_good t_commit [$t commit] 0
+ error_check_good t2_commit [$t2 commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+}
diff --git a/db-4.8.30/test/test109.tcl b/db-4.8.30/test/test109.tcl
new file mode 100644
index 0000000..6c6b3c5
--- /dev/null
+++ b/db-4.8.30/test/test109.tcl
@@ -0,0 +1,322 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test109
+# TEST
+# TEST Test of sequences.
+proc test109 { method {tnum "109"} args } {
+ source ./include.tcl
+ global rand_init
+ global fixed_len
+ global errorCode
+
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ set rpcenv 0
+ set sargs " -thread "
+
+ if { [is_partitioned $args] == 1 } {
+ puts "Test109 skipping for partitioned $method"
+ return
+ }
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ # Fixed_len must be increased from the default to
+ # accommodate fixed-record length methods.
+ set orig_fixed_len $fixed_len
+ set fixed_len 128
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ error_check_good random_seed [berkdb srand $rand_init] 0
+
+ # Test with in-memory dbs, regular dbs, and subdbs.
+ foreach filetype { subdb regular in-memory } {
+ puts "Test$tnum: $method ($args) Test of sequences ($filetype)."
+
+ # Skip impossible combinations.
+ if { $filetype == "subdb" && [is_queue $method] } {
+ puts "Skipping $filetype test for method $method."
+ continue
+ }
+ if { $filetype == "in-memory" && [is_queueext $method] } {
+ puts "Skipping $filetype test for method $method."
+ continue
+ }
+
+ # Reinitialize file name for each file type, then adjust.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ } else {
+ set testfile test$tnum.db
+ set testdir [get_home $env]
+ }
+ if { $filetype == "subdb" } {
+ lappend testfile SUBDB
+ }
+ if { $filetype == "in-memory" } {
+ set testfile ""
+ }
+
+ cleanup $testdir $env
+
+ # Make the key numeric so we can test record-based methods.
+ set key 1
+
+ # Open a noerr db, since we expect errors.
+ set db [eval {berkdb_open_noerr \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.a: Max must be greater than min."
+ set errorCode NONE
+ catch {set seq [eval {berkdb sequence} -create $sargs \
+ -init 0 -min 100 -max 0 $db $key]} res
+ error_check_good max>min [is_substr $errorCode EINVAL] 1
+
+ puts "\tTest$tnum.b: Init can't be out of the min-max range."
+ set errorCode NONE
+ catch {set seq [eval {berkdb sequence} -create $sargs \
+ -init 101 -min 0 -max 100 $db $key]} res
+ error_check_good init [is_substr $errorCode EINVAL] 1
+
+ # Test increment and decrement.
+ set min 0
+ set max 100
+ foreach { init inc } { $min -inc $max -dec } {
+ puts "\tTest$tnum.c: Test for overflow error with $inc."
+ test_sequence $env $db $key $min $max $init $inc
+ }
+
+ # Test cachesize without wrap. Make sure to test both
+ # cachesizes that evenly divide the number of items in the
+ # sequence, and that leave unused elements at the end.
+ set min 0
+ set max 99
+ set init 1
+ set cachesizes [list 2 7 11]
+ foreach csize $cachesizes {
+ foreach inc { -inc -dec } {
+ puts "\tTest$tnum.d:\
+ -cachesize $csize, $inc, no wrap."
+ test_sequence $env $db $key \
+ $min $max $init $inc $csize
+ }
+ }
+ error_check_good db_close [$db close] 0
+
+ # Open a regular db; we expect success on the rest of the tests.
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Test increment and decrement with wrap. Cross from negative
+ # to positive integers.
+ set min -50
+ set max 99
+ set wrap "-wrap"
+ set csize 1
+ foreach { init inc } { $min -inc $max -dec } {
+ puts "\tTest$tnum.e: Test wrapping with $inc."
+ test_sequence $env $db $key \
+ $min $max $init $inc $csize $wrap
+ }
+
+ # Test cachesize with wrap.
+ set min 0
+ set max 99
+ set init 0
+ set wrap "-wrap"
+ foreach csize $cachesizes {
+ puts "\tTest$tnum.f: Test -cachesize $csize with wrap."
+ test_sequence $env $db $key \
+ $min $max $init $inc $csize $wrap
+ }
+
+ # Test multiple handles on the same sequence.
+ foreach csize $cachesizes {
+ puts "\tTest$tnum.g:\
+ Test multiple handles (-cachesize $csize) with wrap."
+ test_sequence $env $db $key \
+ $min $max $init $inc $csize $wrap 1
+ }
+ error_check_good db_close [$db close] 0
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
+
+proc test_sequence { env db key min max init \
+ {inc "-inc"} {csize 1} {wrap "" } {second_handle 0} } {
+ global rand_init
+ global errorCode
+
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ set txnenv [is_txnenv $env]
+ }
+
+ set sargs " -thread "
+
+ # The variable "skip" is the cachesize with a direction.
+ set skip $csize
+ if { $inc == "-dec" } {
+ set skip [expr $csize * -1]
+ }
+
+ # The "limit" is the closest number to the end of the
+ # sequence we can ever see.
+ set limit [expr [expr $max + 1] - $csize]
+ if { $inc == "-dec" } {
+ set limit [expr [expr $min - 1] + $csize]
+ }
+
+ # The number of items in the sequence.
+ set n [expr [expr $max - $min] + 1]
+
+ # Calculate the number of values returned in the first
+ # cycle, and in all other cycles.
+ if { $inc == "-inc" } {
+ set firstcyclehits \
+ [expr [expr [expr $max - $init] + 1] / $csize]
+ } elseif { $inc == "-dec" } {
+ set firstcyclehits \
+ [expr [expr [expr $init - $min] + 1] / $csize]
+ } else {
+ puts "FAIL: unknown inc flag $inc"
+ }
+ set hitspercycle [expr $n / $csize]
+
+ # Create the sequence.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set seq [eval {berkdb sequence} -create $sargs -cachesize $csize \
+ $wrap -init $init -min $min -max $max $txn $inc $db $key]
+ error_check_good is_valid_seq [is_valid_seq $seq] TRUE
+ if { $second_handle == 1 } {
+ set seq2 [eval {berkdb sequence} -create $sargs $txn $db $key]
+ error_check_good is_valid_seq2 [is_valid_seq $seq2] TRUE
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ # Exercise get options.
+ set getdb [$seq get_db]
+ error_check_good seq_get_db $getdb $db
+
+ set flags [$seq get_flags]
+ set exp_flags [list $inc $wrap]
+ foreach item $exp_flags {
+ if { [llength $item] == 0 } {
+ set idx [lsearch -exact $exp_flags $item]
+ set exp_flags [lreplace $exp_flags $idx $idx]
+ }
+ }
+ error_check_good get_flags $flags $exp_flags
+
+ set range [$seq get_range]
+ error_check_good get_range_min [lindex $range 0] $min
+ error_check_good get_range_max [lindex $range 1] $max
+
+ set cache [$seq get_cachesize]
+ error_check_good get_cachesize $cache $csize
+
+ # Within the loop, for each successive seq get we calculate
+ # the value we expect to receive, then do the seq get and
+ # compare.
+ #
+ # Always test some multiple of the number of items in the
+ # sequence; this tests overflow and wrap-around.
+ #
+ set mult 2
+ for { set i 0 } { $i < [expr $n * $mult] } { incr i } {
+ #
+ # Calculate expected return value.
+ #
+ # On the first cycle, start from init.
+ set expected [expr $init + [expr $i * $skip]]
+ if { $i >= $firstcyclehits && $wrap != "-wrap" } {
+ set expected "overflow"
+ }
+
+ # On second and later cycles, start from min or max.
+ # We do a second cycle only if wrapping is specified.
+ if { $wrap == "-wrap" } {
+ if { $inc == "-inc" && $expected > $limit } {
+ set j [expr $i - $firstcyclehits]
+ while { $j >= $hitspercycle } {
+ set j [expr $j - $hitspercycle]
+ }
+ set expected [expr $min + [expr $j * $skip]]
+ }
+
+ if { $inc == "-dec" && $expected < $limit } {
+ set j [expr $i - $firstcyclehits]
+ while { $j >= $hitspercycle } {
+ set j [expr $j - $hitspercycle]
+ }
+ set expected [expr $max + [expr $j * $skip]]
+ }
+ }
+
+ # Get return value. If we've got a second handle, choose
+ # randomly which handle does the seq get.
+ if { $env != "NULL" && [is_txnenv $env] } {
+ set syncarg " -nosync "
+ } else {
+ set syncarg ""
+ }
+ set errorCode NONE
+ if { $second_handle == 0 } {
+ catch {eval {$seq get} $syncarg $csize} res
+ } elseif { [berkdb random_int 0 1] == 0 } {
+ catch {eval {$seq get} $syncarg $csize} res
+ } else {
+ catch {eval {$seq2 get} $syncarg $csize} res
+ }
+
+ # Compare expected to actual value.
+ if { $expected == "overflow" } {
+ error_check_good overflow [is_substr $errorCode EINVAL] 1
+ } else {
+ error_check_good seq_get_wrap $res $expected
+ }
+ }
+
+ # A single handle requires a 'seq remove', but a second handle
+ # should be closed, and then we can remove the sequence.
+ if { $second_handle == 1 } {
+ error_check_good seq2_close [$seq2 close] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good seq_remove [eval {$seq remove} $txn] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+}
diff --git a/db-4.8.30/test/test110.tcl b/db-4.8.30/test/test110.tcl
new file mode 100644
index 0000000..f989779
--- /dev/null
+++ b/db-4.8.30/test/test110.tcl
@@ -0,0 +1,168 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2004-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test110
+# TEST Partial get test with duplicates.
+# TEST
+# TEST For hash and btree, create and populate a database
+# TEST with dups. Randomly selecting offset and length,
+# TEST retrieve data from each record and make sure we
+# TEST get what we expect.
+proc test110 { method {nentries 10000} {ndups 3} args } {
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test110 skipping for method $method"
+ return
+ }
+
+ # Btree with compression does not support unsorted duplicates.
+ if { [is_compressed $args] == 1 } {
+ puts "Test110 skipping for btree with compression."
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test110.db
+ set env NULL
+ } else {
+ set testfile test110.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test110: $method ($args) $nentries partial get test with duplicates"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} -dup $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ berkdb srand $rand_init
+
+ set txn ""
+ set count 0
+
+ puts "\tTest110.a: put/get loop"
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+
+ set key $str
+ set repl [berkdb random_int 1 100]
+ set kvals($key) $repl
+ set data [chop_data $method [replicate $str $repl]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set j 0 } { $j < $ndups } { incr j } {
+ set ret [eval {$db put} $txn {$key $j.$data}]
+ error_check_good dbput:$key:$j $ret 0
+ }
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -set $key]
+
+ set j 0
+ for { set dbt [$dbc get -current] } \
+ { $j < $ndups } \
+ { set dbt [$dbc get -next] } {
+ set d [lindex [lindex $dbt 0] 1]
+ error_check_good dupget:$key:$j $d [pad_data $method $j.$data]
+ incr j
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+
+ puts "\tTest110.b: partial get loop"
+ set did [open $dict]
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+ set key $str
+
+ set data [pad_data $method [replicate $str $kvals($key)]]
+ set j 0
+
+ # Set up cursor. We will use the cursor to walk the dups.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Position cursor at the first of the dups.
+ set ret [$dbc get -set $key]
+
+ for { set dbt [$dbc get -current] } \
+ { $j < $ndups } \
+ { set dbt [$dbc get -next] } {
+
+ set dupdata $j.$data
+ set length [expr [string length $dupdata]]
+ set maxndx [expr $length + 1]
+
+ if { $maxndx > 0 } {
+ set beg [berkdb random_int 0 [expr $maxndx - 1]]
+ set len [berkdb random_int 0 [expr $maxndx * 2]]
+ } else {
+ set beg 0
+ set len 0
+ }
+
+ set ret [eval {$dbc get} -current \
+ {-partial [list $beg $len]}]
+
+ # In order for tcl to handle this, we have to overwrite the
+ # last character with a NULL. That makes the length one less
+ # than we expect.
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good dbget_key $k $key
+ error_check_good dbget_data $d \
+ [string range $dupdata $beg [expr $beg + $len - 1]]
+ incr j
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
diff --git a/db-4.8.30/test/test111.tcl b/db-4.8.30/test/test111.tcl
new file mode 100644
index 0000000..836f1fa
--- /dev/null
+++ b/db-4.8.30/test/test111.tcl
@@ -0,0 +1,370 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test111
+# TEST Test database compaction.
+# TEST
+# TEST Populate a database. Remove a high proportion of entries.
+# TEST Dump and save contents. Compact the database, dump again,
+# TEST and make sure we still have the same contents.
+# TEST Add back some entries, delete more entries (this time by
+# TEST cursor), dump, compact, and do the before/after check again.
+
+proc test111 { method {nentries 10000} {tnum "111"} args } {
+
+ # Compaction is an option for btree and recno databases only.
+ if { [is_hash $method] == 1 || [is_queue $method] == 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ # If a page size was specified, find out what it is. Pages
+ # might not be freed in the case of really large pages (64K)
+ # but we still want to run this test just to make sure
+ # nothing funny happens.
+ set pagesize 0
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ set pagesize [lindex $args $pgindex]
+ }
+
+ source ./include.tcl
+ global rand_init
+ error_check_good set_random_seed [berkdb srand $rand_init] 0
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test$tnum: ($method $args) Database compaction."
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set splitopts { "" "-revsplitoff" }
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ foreach splitopt $splitopts {
+ set testfile $basename.db
+ if { $splitopt == "-revsplitoff" } {
+ set testfile $basename.rev.db
+ if { [is_record_based $method] == 1 } {
+ puts "Skipping\
+ -revsplitoff option for method $method."
+ continue
+ }
+ }
+ set did [open $dict]
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest$tnum.a: Create and populate database ($splitopt)."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $splitopt $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ global kvals
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ incr count
+
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+ set size1 [file size $filename]
+ set free1 [stat_field $db stat "Pages on freelist"]
+ set leaf1 [stat_field $db stat "Leaf pages"]
+ set internal1 [stat_field $db stat "Internal pages"]
+
+ # Delete between 1 and maxdelete items, then skip over between
+ # 1 and maxskip items. This is to make the data bunchy,
+ # so we sometimes follow the code path where merging is
+ # done record by record, and sometimes the path where
+ # the whole page is merged at once.
+
+ puts "\tTest$tnum.b: Delete most entries from database."
+ set did [open $dict]
+ set count [expr $nentries - 1]
+ set maxskip 4
+ set maxdelete 48
+
+ # Since rrecno and rbtree renumber, we delete starting at
+ # nentries and working down to 0.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count > 0 } {
+
+ # Delete a random number of successive items.
+ set ndeletes [berkdb random_int 1 $maxdelete]
+ set target [expr $count - $ndeletes]
+ while { [expr $count > $target] && $count > 0 } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key [gets $did]
+ }
+
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ incr count -1
+ }
+ # Skip over a random smaller number of items.
+ set skip [berkdb random_int 1 [expr $maxskip]]
+ set target [expr $count - $skip]
+ while { [expr $count > $target] && $count > 0 } {
+ incr count -1
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.d: Compact and verify database."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "" 0 0 $nodump ] 0
+ }
+
+ set size2 [file size $filename]
+ set free2 [stat_field $db stat "Pages on freelist"]
+ set leaf2 [stat_field $db stat "Leaf pages"]
+ set internal2 [stat_field $db stat "Internal pages"]
+
+ # The sum of internal pages, leaf pages, and pages freed
+ # should decrease on compaction, indicating that pages
+ # have been freed to the file system.
+ set sum1 [expr $free1 + $leaf1 + $internal1]
+ set sum2 [expr $free2 + $leaf2 + $internal2]
+ error_check_good pages_freed [expr $sum1 > $sum2] 1
+
+ # The on-disk file size should be smaller.
+ #### We should look at the partitioned files #####
+ if { [is_partitioned $args] == 0 } {
+ set reduction .96
+ error_check_good \
+ file_size [expr [expr $size1 * $reduction] > $size2] 1
+ }
+
+ puts "\tTest$tnum.e: Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ puts "\tTest$tnum.f: Add more entries to database."
+ # Use integers as keys instead of strings, just to mix it up
+ # a little.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i < $nentries } { incr i } {
+ set key $i
+ set str $i
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ set size3 [file size $filename]
+ set free3 [stat_field $db stat "Pages on freelist"]
+ set leaf3 [stat_field $db stat "Leaf pages"]
+ set internal3 [stat_field $db stat "Internal pages"]
+
+ puts "\tTest$tnum.g: Remove more entries, this time by cursor."
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ # Delete all items except those evenly divisible by
+ # $maxdelete -- so the db is nearly empty.
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 }\
+ { set dbt [$dbc get -next] ; incr count } {
+ if { [expr $count % $maxdelete] != 0 } {
+ error_check_good dbc_del [$dbc del] 0
+ }
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.h: Save contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.i: Compact and verify database again."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "" 0 0 $nodump ] 0
+ }
+
+ set size4 [file size $filename]
+ set free4 [stat_field $db stat "Pages on freelist"]
+ set leaf4 [stat_field $db stat "Leaf pages"]
+ set internal4 [stat_field $db stat "Internal pages"]
+
+ # The sum of internal pages, leaf pages, and pages freed
+ # should decrease on compaction, indicating that pages
+ # have been freed to the file system.
+ set sum3 [expr $free3 + $leaf3 + $internal3]
+ set sum4 [expr $free4 + $leaf4 + $internal4]
+ error_check_good pages_freed [expr $sum3 > $sum4] 1
+
+ # File should be smaller as well.
+ #### We should look at the partitioned files #####
+ if { [is_partitioned $args] == 0 } {
+ error_check_good \
+ file_size [expr [expr $size3 * $reduction] > $size4] 1
+ }
+
+ puts "\tTest$tnum.j: Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
diff --git a/db-4.8.30/test/test112.tcl b/db-4.8.30/test/test112.tcl
new file mode 100644
index 0000000..bfcc793
--- /dev/null
+++ b/db-4.8.30/test/test112.tcl
@@ -0,0 +1,285 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test112
+# TEST Test database compaction with a deep tree.
+# TEST
+# TEST This is a lot like test111, but with a large number of
+# TEST entries and a small page size to make the tree deep.
+# TEST To make it simple we use numerical keys all the time.
+# TEST
+# TEST Dump and save contents. Compact the database, dump again,
+# TEST and make sure we still have the same contents.
+# TEST Add back some entries, delete more entries (this time by
+# TEST cursor), dump, compact, and do the before/after check again.
+
+proc test112 { method {nentries 80000} {tnum "112"} args } {
+ source ./include.tcl
+ global alphabet
+
+ # Compaction is an option for btree and recno databases only.
+ if { [is_hash $method] == 1 || [is_queue $method] == 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ # Skip for specified pagesizes. This test uses a small
+ # pagesize to generate a deep tree.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test$tnum: Skipping for specific pagesizes"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set txn ""
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test$tnum: $method ($args) Database compaction with deep tree."
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create\
+ -pagesize 512 -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ puts "\tTest$tnum.a: Populate database."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set str $i.$alphabet
+
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+ set size1 [file size $filename]
+ set levels [stat_field $db stat "Levels"]
+ error_check_good enough_levels [expr $levels >= 4] 1
+ set free1 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.b: Delete most entries from database."
+ # Leave every nth item. Since rrecno renumbers, we
+ # delete starting at nentries and working down to 0.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i $nentries } { $i > 0 } { incr i -1 } {
+ set key $i
+
+ # Leave every n'th item.
+ set n 121
+ if { [expr $i % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ dump_file $db "" $t1
+
+ puts "\tTest$tnum.d: Compact database."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
+ }
+
+ set size2 [file size $filename]
+ set free2 [stat_field $db stat "Pages on freelist"]
+
+ # The on-disk file size should be significantly smaller.
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ set reduction .80
+ error_check_good file_size [expr [expr $size1 * $reduction] > $size2] 1
+}
+
+ # Pages should be freed for all methods except maybe
+ # record-based non-queue methods. Even with recno, the
+ # number of free pages may not decline.
+
+ if { [is_record_based $method] == 1 } {
+ error_check_good pages_freed [expr $free2 >= $free1] 1
+ } else {
+ error_check_good pages_freed [expr $free2 > $free1] 1
+ }
+
+ # Also, we should have reduced the number of levels.
+ set newlevels [stat_field $db stat "Levels"]
+ error_check_good fewer_levels [expr $newlevels < $levels ] 1
+
+ puts "\tTest$tnum.e: Check that contents are the same after compaction."
+ dump_file $db "" $t2
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ puts "\tTest$tnum.f: Add more entries to database."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i < $nentries } { incr i } {
+ set key $i
+ set str $i.$alphabet
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ set size3 [file size $filename]
+ set free3 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.g: Remove more entries, this time by cursor."
+ set i 0
+ set n 11
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 }\
+ { set dbt [$dbc get -next] ; incr i } {
+ if { [expr $i % $n] != 0 } {
+ error_check_good dbc_del [$dbc del] 0
+ }
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.h: Save contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.i: Compact database again."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
+ }
+
+ set size4 [file size $filename]
+ set free4 [stat_field $db stat "Pages on freelist"]
+
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ error_check_good file_size [expr [expr $size3 * $reduction] > $size4] 1
+}
+ if { [is_record_based $method] == 1 } {
+ error_check_good pages_freed [expr $free4 >= $free3] 1
+ } else {
+ error_check_good pages_freed [expr $free4 > $free3] 1
+ }
+
+ puts "\tTest$tnum.j: Check that contents are the same after compaction."
+ dump_file $db "" $t2
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ error_check_good db_close [$db close] 0
+}
+
diff --git a/db-4.8.30/test/test113.tcl b/db-4.8.30/test/test113.tcl
new file mode 100644
index 0000000..c0b3364
--- /dev/null
+++ b/db-4.8.30/test/test113.tcl
@@ -0,0 +1,267 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test113
+# TEST Test database compaction with duplicates.
+# TEST
+# TEST This is essentially test111 with duplicates.
+# TEST To make it simple we use numerical keys all the time.
+# TEST
+# TEST Dump and save contents. Compact the database, dump again,
+# TEST and make sure we still have the same contents.
+# TEST Add back some entries, delete more entries (this time by
+# TEST cursor), dump, compact, and do the before/after check again.
+
+proc test113 { method {nentries 10000} {ndups 5} {tnum "113"} args } {
+ source ./include.tcl
+ global alphabet
+
+ # Compaction and duplicates can occur only with btree.
+ if { [is_btree $method] != 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ # Skip for specified pagesizes. This test uses a small
+ # pagesize to generate a deep tree.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test$tnum: Skipping for specific pagesizes"
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ set txn ""
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test$tnum: $method ($args)\
+ Database compaction with duplicates."
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -pagesize 512 \
+ -dup -dupsort -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.a: Populate database with dups."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ for { set j 1 } { $j <= $ndups } { incr j } {
+ set str $i.$j.$alphabet
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+ set size1 [file size $filename]
+ set free1 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.b: Delete most entries from database."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i $nentries } { $i >= 0 } { incr i -1 } {
+ set key $i
+
+ # Leave every n'th item.
+ set n 7
+ if { [expr $i % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.d: Compact database."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
+ }
+
+ set size2 [file size $filename]
+ set free2 [stat_field $db stat "Pages on freelist"]
+ error_check_good pages_freed [expr $free2 > $free1] 1
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ set reduction .80
+ error_check_good file_size [expr [expr $size1 * $reduction] > $size2] 1
+}
+
+ puts "\tTest$tnum.e: Check that contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ puts "\tTest$tnum.f: Add more entries to database."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ for { set j 1 } { $j <= $ndups } { incr j } {
+ set str $i.$j.$alphabet.extra
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ set size3 [file size $filename]
+ set free3 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.g: Remove more entries, this time by cursor."
+ set i 0
+ set n 11
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 }\
+ { set dbt [$dbc get -next] ; incr i } {
+
+ if { [expr $i % $n] != 0 } {
+ error_check_good dbc_del [$dbc del] 0
+ }
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.h: Save contents."
+ dump_file $db "" $t1
+
+ puts "\tTest$tnum.i: Compact database again."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
+ }
+
+ set size4 [file size $filename]
+ set free4 [stat_field $db stat "Pages on freelist"]
+ error_check_good pages_freed [expr $free4 > $free3] 1
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ error_check_good file_size [expr [expr $size3 * $reduction] > $size4] 1
+}
+
+ puts "\tTest$tnum.j: Check that contents are the same after compaction."
+ dump_file $db "" $t2
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/db-4.8.30/test/test114.tcl b/db-4.8.30/test/test114.tcl
new file mode 100644
index 0000000..57ab378
--- /dev/null
+++ b/db-4.8.30/test/test114.tcl
@@ -0,0 +1,339 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test114
+# TEST Test database compaction with overflows.
+# TEST
+# TEST Populate a database. Remove a high proportion of entries.
+# TEST Dump and save contents. Compact the database, dump again,
+# TEST and make sure we still have the same contents.
+# TEST Add back some entries, delete more entries (this time by
+# TEST cursor), dump, compact, and do the before/after check again.
+
+proc test114 { method {nentries 10000} {tnum "114"} args } {
+ source ./include.tcl
+ global alphabet
+
+ # Compaction is an option for btree and recno databases only.
+ if { [is_hash $method] == 1 || [is_queue $method] == 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ # We run with a small page size to force overflows. Skip
+ # testing for specified page size.
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test$tnum: Skipping for specific pagesize."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit"
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test$tnum: ($method $args) Database compaction with overflows."
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set splitopts { "" "-revsplitoff" }
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ foreach splitopt $splitopts {
+ set testfile $basename.db
+ if { $splitopt == "-revsplitoff" } {
+ set testfile $basename.rev.db
+ if { [is_record_based $method] == 1 } {
+ puts "Skipping\
+ -revsplitoff option for method $method."
+ continue
+ }
+ }
+ set did [open $dict]
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest$tnum.a: Create and populate database ($splitopt)."
+ set pagesize 512
+ set db [eval {berkdb_open -create -pagesize $pagesize \
+ -mode 0644} $splitopt $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set str [repeat $alphabet 100]
+
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ incr count
+
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+ set size1 [file size $filename]
+ set free1 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.b: Delete most entries from database."
+ set did [open $dict]
+ set count [expr $nentries - 1]
+ set n 57
+
+ # Leave every nth item. Since rrecno renumbers, we
+ # delete starting at nentries and working down to 0.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count > 0 } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { [expr $count % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ }
+ incr count -1
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.d: Compact and verify database."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
+ }
+
+ set size2 [file size $filename]
+ set free2 [stat_field $db stat "Pages on freelist"]
+
+ # Reduction in on-disk size should be substantial.
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ set reduction .80
+ error_check_good \
+ file_size [expr [expr $size1 * $reduction] > $size2] 1
+}
+
+ # Pages should be freed for all methods except maybe
+ # record-based non-queue methods. Even with recno, the
+ # number of free pages may not decline.
+ if { [is_record_based $method] == 1 } {
+ error_check_good pages_freed [expr $free2 >= $free1] 1
+ } else {
+ error_check_good pages_freed [expr $free2 > $free1] 1
+ }
+
+ puts "\tTest$tnum.e: Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ puts "\tTest$tnum.f: Add more entries to database."
+ # Use integers as keys instead of strings, just to mix it up
+ # a little.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i < $nentries } { incr i } {
+ set key $i
+ set str [repeat $alphabet 100]
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ set size3 [file size $filename]
+ set free3 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.g: Remove more entries, this time by cursor."
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ # Leave every nth item.
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 }\
+ { set dbt [$dbc get -next] ; incr count } {
+ if { [expr $count % $n] != 0 } {
+ error_check_good dbc_del [$dbc del] 0
+ }
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.h: Save contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.i: Compact and verify database again."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good verify_dir \
+ [ verify_dir $testdir "" 0 0 $nodump] 0
+ }
+
+ set size4 [file size $filename]
+ set free4 [stat_field $db stat "Pages on freelist"]
+
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ error_check_good \
+ file_size [expr [expr $size3 * $reduction] > $size4] 1
+#### We are specifying -freespace why should there be more things on the free list? #######
+ if { [is_record_based $method] == 1 } {
+ error_check_good pages_freed [expr $free4 >= $free3] 1
+ } else {
+ error_check_good pages_freed [expr $free4 > $free3] 1
+ }
+}
+
+ puts "\tTest$tnum.j: Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
diff --git a/db-4.8.30/test/test115.tcl b/db-4.8.30/test/test115.tcl
new file mode 100644
index 0000000..086a5a0
--- /dev/null
+++ b/db-4.8.30/test/test115.tcl
@@ -0,0 +1,362 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test115
+# TEST Test database compaction with user-specified btree sort.
+# TEST
+# TEST This is essentially test111 with the user-specified sort.
+# TEST Populate a database. Remove a high proportion of entries.
+# TEST Dump and save contents. Compact the database, dump again,
+# TEST and make sure we still have the same contents.
+# TEST Add back some entries, delete more entries (this time by
+# TEST cursor), dump, compact, and do the before/after check again.
+
+proc test115 { method {nentries 10000} {tnum "115"} args } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+ global encrypt
+ global passwd
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ # If a page size was specified, find out what it is. Pages
+ # might not be freed in the case of really large pages (64K)
+ # but we still want to run this test just to make sure
+ # nothing funny happens.
+ set pagesize 0
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ set pagesize [lindex $args $pgindex]
+ }
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ set envargs ""
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC."
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test$tnum:\
+ ($method $args $encargs) Database compaction with user-specified sort."
+
+ cleanup $testdir $env
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set splitopts { "" "-revsplitoff" }
+ set txn ""
+
+ set checkfunc test093_check
+
+ foreach splitopt $splitopts {
+ set testfile $basename.db
+ if { $splitopt == "-revsplitoff" } {
+ set testfile $basename.rev.db
+ }
+ set did [open $dict]
+
+ puts "\tTest$tnum.a: Create and populate database ($splitopt)."
+ set db [eval {berkdb_open -create -btcompare test093_cmp1 \
+ -mode 0644} $splitopt $args $encargs $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ set btvals {}
+ set btvalsck {}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ lappend btvals $key
+ incr count
+
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+ set size1 [file size $filename]
+ set free1 [stat_field $db stat "Pages on freelist"]
+ set leaf1 [stat_field $db stat "Leaf pages"]
+ set internal1 [stat_field $db stat "Internal pages"]
+
+ puts "\tTest$tnum.b: Delete most entries from database."
+ set did [open $dict]
+ set count [expr $nentries - 1]
+ set n 14
+
+ # Leave every nth item. Since rrecno renumbers, we
+ # delete starting at nentries and working down to 0.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count > 0 } {
+ set key $str
+
+ if { [expr $count % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ }
+ incr count -1
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.d: Compact and verify database."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ if { [catch {eval \
+ {berkdb dbverify -btcompare test093_cmp1}\
+ $envargs $encargs {$testfile}} res] } {
+ puts "FAIL: Verification failed with $res"
+ }
+
+ }
+
+ set size2 [file size $filename]
+ set free2 [stat_field $db stat "Pages on freelist"]
+ set leaf2 [stat_field $db stat "Leaf pages"]
+ set internal2 [stat_field $db stat "Internal pages"]
+
+ # The sum of internal pages, leaf pages, and pages freed
+ # should decrease on compaction, indicating that pages
+ # have been freed to the file system.
+ set sum1 [expr $free1 + $leaf1 + $internal1]
+ set sum2 [expr $free2 + $leaf2 + $internal2]
+ error_check_good pages_freed [expr $sum1 > $sum2] 1
+
+ # Check for reduction in file size.
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ set reduction .95
+ error_check_good \
+ file_size [expr [expr $size1 * $reduction] > $size2] 1
+}
+ puts "\tTest$tnum.e: Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ puts "\tTest$tnum.f: Add more entries to database."
+ set did [open $dict]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ lappend btvals $key
+ incr count
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ set size3 [file size $filename]
+ set free3 [stat_field $db stat "Pages on freelist"]
+ set leaf3 [stat_field $db stat "Leaf pages"]
+ set internal3 [stat_field $db stat "Internal pages"]
+
+ puts "\tTest$tnum.g: Remove more entries, this time by cursor."
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ # Leave every nth item.
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 }\
+ { set dbt [$dbc get -next] ; incr count } {
+ if { [expr $count % $n] != 0 } {
+ error_check_good dbc_del [$dbc del] 0
+ }
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.h: Save contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+
+ puts "\tTest$tnum.i: Compact and verify database again."
+ for {set commit 0} {$commit <= $txnenv} {incr commit} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval $db compact $txn -freespace]
+ if { $txnenv == 1 } {
+ if { $commit == 0 } {
+ puts "\tTest$tnum.d: Aborting."
+ error_check_good txn_abort [$t abort] 0
+ } else {
+ puts "\tTest$tnum.d: Committing."
+ error_check_good txn_commit [$t commit] 0
+ }
+ }
+ error_check_good db_sync [$db sync] 0
+ if { [catch {eval \
+ {berkdb dbverify -btcompare test093_cmp1}\
+ $envargs $encargs {$testfile}} res] } {
+ puts "FAIL: Verification failed with $res"
+ }
+ }
+
+ set size4 [file size $filename]
+ set free4 [stat_field $db stat "Pages on freelist"]
+ set leaf4 [stat_field $db stat "Leaf pages"]
+ set internal4 [stat_field $db stat "Internal pages"]
+
+ # The sum of internal pages, leaf pages, and pages freed
+ # should decrease on compaction, indicating that pages
+ # have been freed to the file system.
+ set sum3 [expr $free3 + $leaf3 + $internal3]
+ set sum4 [expr $free4 + $leaf4 + $internal4]
+ error_check_good pages_freed [expr $sum3 > $sum4] 1
+
+ # Check for file size reduction.
+#### We should look at the partitioned files #####
+if { [is_partitioned $args] == 0 } {
+ error_check_good\
+ file_size [expr [expr $size3 * $reduction] > $size4] 1
+}
+
+ puts "\tTest$tnum.j: Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good filecmp [filecmp $t1 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ }
+
+ # Clean up so the general verification (without the custom comparator)
+ # doesn't fail.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+}
diff --git a/db-4.8.30/test/test116.tcl b/db-4.8.30/test/test116.tcl
new file mode 100644
index 0000000..b5cdf3c
--- /dev/null
+++ b/db-4.8.30/test/test116.tcl
@@ -0,0 +1,303 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test116
+# TEST Test of basic functionality of lsn_reset.
+# TEST
+# TEST Create a database in an env. Copy it to a new file within
+# TEST the same env. Reset the page LSNs.
+proc test116 { method {tnum "116"} args } {
+ source ./include.tcl
+ global util_path
+ global passwd
+
+ set orig_tdir $testdir
+ puts "Test$tnum ($method): Test lsn_reset."
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ set testfile A.db
+ set newtag new
+ set newfile $testfile.$newtag
+ set nentries 50
+ set filenames "A B C D E"
+
+ # This test needs two envs. If one is provided, create the
+ # second under it. If no env is provided, create both.
+ set txn ""
+ set txnenv 0
+ set envargs ""
+ set resetargs ""
+ set eindex [lsearch -exact $args "-env"]
+
+ if { $eindex == -1 } {
+ puts "\tTest$tnum.a: Creating env."
+ env_cleanup $testdir
+ set env [eval {berkdb_env} \
+ -create $encargs $envargs -home $testdir -txn]
+ append args " -auto_commit "
+ error_check_good dbenv [is_valid_env $env] TRUE
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "\tTest$tnum.a: Using provided env $env."
+
+ # Make sure the second env we create has all the
+ # same flags the provided env does.
+ if { [is_substr [$env get_open_flags] "-thread"] } {
+ append envargs " -thread "
+ }
+ if { [is_substr $args "-encrypt"] } {
+ append envargs " -encryptaes $passwd "
+ }
+ if { [is_substr [$env get_encrypt_flags] "-encryptaes"] } {
+ append envargs " -encryptaes $passwd "
+ append resetargs " -encrypt "
+ }
+ set txn ""
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ } elseif { $txnenv == 0 } {
+ puts "Skipping Test$tnum for non-transactional env."
+ return
+ }
+ set testdir [get_home $env]
+ }
+
+ foreach lorder { 1234 4321 } {
+ if { $lorder == 1234 } {
+ set pattern "i i"
+ } else {
+ set pattern "I I"
+ }
+
+ # Open database A, populate and close.
+ puts "\tTest$tnum.b: Creating database with lorder $lorder."
+ cleanup $testdir $env
+
+ # Create a second directory, and create an env there.
+ set testdir [get_home $env]
+ set newdir $testdir/NEWDIR
+ file mkdir $newdir
+ set newenv [eval {berkdb_env} \
+ -create $encargs $envargs -home $newdir -txn]
+ error_check_good newenv [is_valid_env $newenv] TRUE
+
+ # We test with subdatabases except with the queue access
+ # method, where they are not allowed.
+ if { [is_queue $method] == 1 || [is_partitioned $args] == 1} {
+ set db [eval {berkdb_open} -env $env -lorder $lorder \
+ $omethod $args -create -mode 0644 $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set pgsize [stat_field $db stat "Page size"]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set data DATA.$i
+ error_check_good db_put [eval {$db put} \
+ $txn $key [chop_data $method $data]] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ } else {
+ foreach filename $filenames {
+ set db [eval {berkdb_open} -env $env \
+ -lorder $lorder $omethod $args -create \
+ -mode 0644 $testfile $filename]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set pgsize [stat_field $db stat "Page size"]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good \
+ txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set data DATA.$i
+ error_check_good \
+ db_put [eval {$db put} $txn \
+ $key [chop_data $method $data]] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+ }
+
+ # Copy database file A. Reset LSNs on the copy. Then
+ # test that the copy is usable both in its native env
+ # and in a new env.
+
+ puts "\tTest$tnum.c: Copy database and reset its LSNs."
+ set testdir [get_home $env]
+ set newdir [get_home $newenv]
+
+ # Reset LSNs before copying. We do a little dance here:
+ # first copy the file within the same directory, then reset
+ # the fileid on the copy, then reset the LSNs on the copy,
+ # and only then copy the new file to the new env. Otherwise
+ # the LSNs would get reset on the original file.
+
+ file copy -force $testdir/$testfile $testdir/$newfile
+ # If we're using queue extents or partitions , we must
+ # copy the extents/partitions to the new file name as well.
+ set extents ""
+ if { [is_queueext $method] || [is_partitioned $args]} {
+ copy_extent_file $testdir $testfile $newtag
+ }
+ error_check_good fileid_reset [$env id_reset $newfile] 0
+ error_check_good \
+ lsn_reset [eval {$env lsn_reset} $resetargs {$newfile}] 0
+
+ file copy -force $testdir/$newfile $newdir/$testfile
+
+ # If we're using queue extents, we must copy the extents
+ # to the new directory as well.
+ if { [is_queueext $method] || [is_partitioned $args]} {
+ set extents [get_extfiles $testdir $newfile ""]
+ foreach extent $extents {
+ set nextent [make_ext_filename \
+ $testdir/NEWDIR $testfile $extent]
+ file copy -force $extent $nextent
+ }
+ }
+
+ # Get the LSNs and check them.
+ set npages [getlsns \
+ $testdir $testfile $extents $pgsize orig_lsns]
+ set newpages [getlsns \
+ $testdir $newfile $extents $pgsize new_lsns]
+ set newdirpages [getlsns \
+ $newdir $testfile $extents $pgsize newdir_lsns]
+ error_check_good newpages_match $npages $newpages
+ error_check_good newdirpages_match $npages $newdirpages
+ for { set i 0 } { $i < $npages } { incr i } {
+ error_check_binary \
+ new_lsns [binary format $pattern 0 1] $new_lsns($i)
+ error_check_binary \
+ newdirlsns_match \
+ [binary format $pattern 0 1] $newdir_lsns($i)
+ }
+
+ if { [ is_partitioned $args] } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+ puts "\tTest$tnum.d: Verify directories with reset LSNs."
+ error_check_good \
+ verify [verify_dir $testdir "\tTest$tnum.d: " 0 0 $nodump] 0
+ error_check_good \
+ verify [verify_dir $newdir "\tTest$tnum.e: " 0 0 $nodump] 0
+
+ puts "\tTest$tnum.f: Open new db, check data, close db."
+ if { [is_queue $method] == 1 || [is_partitioned $args] == 1 } {
+ set db [eval {berkdb_open} -env $newenv \
+ -lorder $lorder \
+ $omethod $args -create -mode 0644 $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$newenv txn]
+ error_check_good txn [is_valid_txn $t $newenv] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set ret [eval {$db get} $txn $key]
+ error_check_good db_get \
+ [lindex [lindex $ret 0] 1] \
+ [pad_data $method DATA.$i]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ } else {
+ foreach filename $filenames {
+ set db [eval {berkdb_open} -env $newenv \
+ -lorder $lorder $omethod $args \
+ -create -mode 0644 $testfile $filename ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$newenv txn]
+ error_check_good \
+ txn [is_valid_txn $t $newenv] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 1 } { $i <= $nentries } { incr i } {
+ set key $i
+ set ret [eval {$db get} $txn $key]
+ error_check_good db_get \
+ [lindex [lindex $ret 0] 1] \
+ [pad_data $method DATA.$i]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+ }
+ error_check_good newfile_rm [$env dbremove $newfile] 0
+ error_check_good newenv_close [$newenv close] 0
+ fileremove -f $newdir
+ }
+
+ set testdir $orig_tdir
+ # Close the parent env if this test created it.
+ if { $eindex == -1 } {
+ error_check_good env_close [$env close] 0
+ }
+}
+
+proc getlsns { testdir dbfile extents pgsize lsns } {
+ upvar $lsns file_lsns
+ set fid [open $testdir/$dbfile r]
+ fconfigure $fid -translation binary
+ set eof 0
+ set pg 0
+ while { $eof == 0 } {
+ set offset [expr $pg * $pgsize]
+ seek $fid $offset start
+ set file_lsns($pg) [read $fid 8]
+ set eof [eof $fid]
+ incr pg
+ }
+ close $fid
+ incr pg -1
+ foreach extent $extents {
+ set ep [getlsns $testdir \
+ [make_ext_filename "." $dbfile $extent] \
+ {} $pgsize elsns]
+ for {set i 0} {$i < $ep} {incr i} {
+ set file_lsns($pg) $elsns($i)
+ incr pg
+ }
+ }
+ return $pg
+}
+
+proc error_check_binary {func desired result} {
+ if { [binary_compare $desired $result] != 0 } {
+ flush stdout
+ flush stderr
+ binary scan $desired h16 d
+ binary scan $result h16 r
+ error "FAIL:[timestamp]\
+ $func: expected $d, got $r"
+ }
+}
diff --git a/db-4.8.30/test/test117.tcl b/db-4.8.30/test/test117.tcl
new file mode 100644
index 0000000..1537159
--- /dev/null
+++ b/db-4.8.30/test/test117.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test117
+# TEST Test database compaction with requested fill percent.
+# TEST
+# TEST Populate a database. Remove a high proportion of entries.
+# TEST Dump and save contents. Compact the database, requesting
+# TEST fill percentages starting at 10% and working our way up to
+# TEST 100. On each cycle, make sure we still have the same contents.
+# TEST
+# TEST Unlike the other compaction tests, this one does not
+# TEST use -freespace.
+
+proc test117 { method {nentries 10000} {tnum "117"} args } {
+ source ./include.tcl
+
+ # Compaction is an option for btree and recno databases only.
+ if { [is_hash $method] == 1 || [is_queue $method] == 1 } {
+ puts "Skipping test$tnum for method $method."
+ return
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set basename $testdir/test$tnum
+ set env NULL
+ } else {
+ set basename test$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test$tnum: ($method $args) Database compaction and fillpercent."
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set splitopts { "" "-revsplitoff" }
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ foreach splitopt $splitopts {
+ set testfile $basename.db
+ if { $splitopt == "-revsplitoff" } {
+ set testfile $basename.rev.db
+ if { [is_record_based $method] == 1 } {
+ puts "Skipping\
+ -revsplitoff option for method $method."
+ continue
+ }
+ }
+ set did [open $dict]
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest$tnum.a: Create and populate database ($splitopt)."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $splitopt $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ global kvals
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+
+ set ret [eval \
+ {$db put} $txn {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ incr count
+
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ close $did
+ error_check_good db_sync [$db sync] 0
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ set filename $testdir/$testfile
+ } else {
+ set filename $testfile
+ }
+ set size1 [file size $filename]
+ set free1 [stat_field $db stat "Pages on freelist"]
+
+ puts "\tTest$tnum.b: Delete most entries from database."
+ set did [open $dict]
+ set count [expr $nentries - 1]
+ set n 17
+
+ # Leave every nth item. Since rrecno renumbers, we
+ # delete starting at nentries and working down to 0.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ while { [gets $did str] != -1 && $count > 0 } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { [expr $count % $n] != 0 } {
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good del $ret 0
+ }
+ incr count -1
+ }
+ if { $txnenv == 1 } {
+ error_check_good t_commit [$t commit] 0
+ }
+ error_check_good db_sync [$db sync] 0
+
+ puts "\tTest$tnum.c: Do a dump_file on contents."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ # Start by compacting pages filled less than 10% and
+ # work up to 100%.
+ for { set fillpercent 10 } { $fillpercent <= 100 }\
+ { incr fillpercent 10 } {
+
+ puts "\tTest$tnum.d: Compact and verify database\
+ with fillpercent $fillpercent."
+ set ret [$db compact -fillpercent $fillpercent]
+ error_check_good db_sync [$db sync] 0
+ set size2 [file size $filename]
+ error_check_good verify_dir \
+ [verify_dir $testdir "" 0 0 $nodump] 0
+ set free2 [stat_field $db stat "Pages on freelist"]
+
+ # The number of free pages should never decline.
+ error_check_good pages_freed [expr $free2 >= $free1] 1
+ error_check_good file_size [expr $size2 <= $size1] 1
+
+ puts "\tTest$tnum.e:\
+ Contents are the same after compaction."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t2
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+ error_check_good filecmp [filecmp $t1 $t2] 0
+ set free1 $free2
+ set size1 $size2
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
diff --git a/db-4.8.30/test/test119.tcl b/db-4.8.30/test/test119.tcl
new file mode 100644
index 0000000..8ab5154
--- /dev/null
+++ b/db-4.8.30/test/test119.tcl
@@ -0,0 +1,258 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test119
+# TEST Test behavior when Berkeley DB returns DB_BUFFER_SMALL on a cursor.
+# TEST
+# TEST If the user-supplied buffer is not large enough to contain
+# TEST the returned value, DB returns BUFFER_SMALL. If it does,
+# TEST check that the cursor does not move -- if it moves, it will
+# TEST skip items. [#13815]
+
+proc test119 { method {tnum "119"} args} {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test$tnum: $method ($args) Test of DB_BUFFER_SMALL."
+
+ # Skip for queue; it has fixed-length records, so overflowing
+ # the buffer isn't possible with an ordinary get.
+ if { [is_queue $method] == 1 } {
+ puts "Skipping test$tnum for method $method"
+ return
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set txn ""
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+
+ puts "\tTest$tnum.a: Set up database."
+ set db [eval \
+ {berkdb_open_noerr -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Test -data_buf_size with db->get.
+ puts "\tTest$tnum.b: Test db get with -data_buf_size."
+ set datalength 20
+ set data [repeat "a" $datalength]
+ set key 1
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+
+ error_check_good db_put \
+ [eval {$db put} $txn {$key [chop_data $method $data]}] 0
+
+ # A get with data_buf_size equal to the data size should work.
+ set ret [eval {$db get} $txn -data_buf_size $datalength $key]
+ error_check_good db_get_key [lindex [lindex $ret 0] 0] $key
+ error_check_good db_get_data [lindex [lindex $ret 0] 1] $data
+
+ # A get with a data_buf_size decreased by one should fail.
+ catch {eval {$db get}\
+ $txn -data_buf_size [expr $datalength - 1] $key} res
+ error_check_good buffer_small_error [is_substr $res DB_BUFFER_SMALL] 1
+
+ # Delete the item so it won't get in the way of the cursor test.
+ error_check_good db_del [eval {$db del} $txn $key] 0
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ # Test -data_buf_size and -key_buf_size with dbc->get.
+ #
+ # Set up a database that includes large and small keys and
+ # large and small data in various combinations.
+ #
+ # Create small buffer equal to the largest page size. This will
+ # get DB_BUFFER_SMALL errors.
+ # Create big buffer large enough to never get DB_BUFFER_SMALL
+ # errors with this data set.
+
+ puts "\tTest$tnum.c:\
+ Test cursor get with -data_buf_size and -key_buf_size."
+ set key $alphabet
+ set data $alphabet
+ set nentries 100
+ set start 100
+ set bigkey [repeat $key 8192]
+ set bigdata [repeat $data 8192]
+ set buffer [expr 64 * 1024]
+ set bigbuf [expr $buffer * 8]
+
+ puts "\tTest$tnum.c1: Populate database."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+
+ # Put in a big key every X data items, and big data every
+ # Y data items. X and Y should be small enough that we
+ # hit the case where both X and Y are big.
+ set x 5
+ set y 7
+ for { set i $start } { $i < [expr $nentries + $start] } { incr i } {
+ # If we have a record-based method, we can't have big keys.
+ # Just use the count.
+ if { [is_record_based $method] == 1 } {
+ set k $i
+ } else {
+ if { [expr $i % $x] == 1 } {
+ set k $i.$bigkey
+ } else {
+ set k $i.$key
+ }
+ }
+
+ # We can have big data on any method.
+ if { [expr $i % $y] == 1 } {
+ set d $i.$bigdata
+ } else {
+ set d $i.$data
+ }
+ error_check_good db_put \
+ [eval {$db put} $txn {$k [chop_data $method $d]}] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn_commit [$t commit] 0
+ }
+
+ # Walk the database with a cursor. When we hit DB_BUFFER_SMALL,
+ # make sure DB returns the appropriate key/data pair.
+ puts "\tTest$tnum.c2: Walk the database with a cursor."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $curs $db] TRUE
+
+ # Since hash is not sorted, we'll test that no items are
+ # skipped by keeping a list of all items retrieved, and
+ # making sure it is complete and that each item is unique
+ # at the end of the test.
+ set hashitems {}
+
+ set count $start
+ for { set kd [catch {eval $curs get \
+ -key_buf_size $buffer -data_buf_size $buffer -first} res] } \
+ { $count < [expr $nentries + $start] } \
+ { set kd [catch {eval $curs get \
+ -key_buf_size $buffer -data_buf_size $buffer -next} res] } {
+ if { $kd == 1 } {
+ # Make sure we have the expected error.
+ error_check_good buffer_small_error \
+ [is_substr $errorCode DB_BUFFER_SMALL] 1
+
+ # Adjust the buffer sizes to fit the big key or data.
+ if { [expr $count % $x] == 1 } {
+ set key_buf $bigbuf
+ } else {
+ set key_buf $buffer
+ }
+ if { [expr $count % $y] == 1 } {
+ set data_buf $bigbuf
+ } else {
+ set data_buf $buffer
+ }
+
+ # Hash is not sorted, so just make sure we can get
+ # the item with a large buffer and check it later.
+ # Likewise for partition callback.
+ if { [is_hash $method] == 1 || \
+ [is_partition_callback $args] == 1} {
+ set data_buf $bigbuf
+ set key_buf $bigbuf
+ }
+
+ # Retrieve with big buffer; there should be no error.
+ # This also walks the cursor forward.
+ set nextbig [catch {eval $curs get -key_buf_size \
+ $key_buf -data_buf_size $data_buf -next} res]
+ error_check_good data_big_buffer_get $nextbig 0
+
+ # Extract the item number.
+ set key [lindex [lindex $res 0] 0]
+ set data [lindex [lindex $res 0] 1]
+ if { [string first . $key] != -1 } {
+ set keyindex [string first . $key]
+ set keynumber \
+ [string range $key 0 [expr $keyindex - 1]]
+ } else {
+ set keynumber $key
+ }
+ set dataindex [string first . $data]
+ set datanumber \
+ [string range $data 0 [expr $dataindex - 1]]
+
+ # If not hash, check that item number is correct.
+ # If hash, save the number for later verification.
+ if { [is_hash $method] == 0 \
+ && [is_partition_callback $args] == 0 } {
+ error_check_good key_number $keynumber $count
+ error_check_good data_number $datanumber $count
+ } else {
+ lappend hashitems $keynumber
+ }
+ } else {
+ # For hash, save the item numbers of all items
+ # retrieved, not just those returning DB_BUFFER_SMALL.
+ if { [is_hash $method] == 1 || \
+ [is_partition_callback $args] == 1} {
+ set key [lindex [lindex $res 0] 0]
+ set keyindex [string first . $key]
+ set keynumber \
+ [string range $key 0 [expr $keyindex - 1]]
+ lappend hashitems $keynumber
+ }
+ }
+ incr count
+ set errorCode NONE
+ }
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now check the list of items retrieved from hash.
+ if { [is_hash $method] == 1 || \
+ [is_partition_callback $args] == 1} {
+ set sortedhashitems [lsort $hashitems]
+ for { set i $start } \
+ { $i < [expr $nentries + $start] } { incr i } {
+ set hashitem \
+ [lindex $sortedhashitems [expr $i - $start]]
+ error_check_good hash_check $hashitem $i
+ }
+ }
+ error_check_good db_close [$db close] 0
+}
+
diff --git a/db-4.8.30/test/test120.tcl b/db-4.8.30/test/test120.tcl
new file mode 100644
index 0000000..f9694a3
--- /dev/null
+++ b/db-4.8.30/test/test120.tcl
@@ -0,0 +1,98 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test120
+# TEST Test of multi-version concurrency control.
+# TEST
+# TEST Test basic functionality: a snapshot transaction started
+# TEST before a regular transaction's put can't see the modification.
+# TEST A snapshot transaction started after the put can see it.
+
+proc test120 { method {tnum "120"} args } {
+ source ./include.tcl
+
+ # This test needs its own env.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+
+ # MVCC is not allowed with queue methods.
+ if { [is_queue $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ puts "\tTest$tnum ($method): MVCC and blocking."
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set pageargs ""
+ split_pageargs $args pageargs
+ set filename "test.db"
+
+ # Create transactional env. Specifying -multiversion makes
+ # all databases opened within the env -multiversion.
+ env_cleanup $testdir
+ puts "\tTest$tnum.a: Creating txn env."
+ set env [eval {berkdb_env}\
+ -create -txn -multiversion $pageargs $encargs -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open database.
+ puts "\tTest$tnum.b: Creating -multiversion db."
+ set db [eval {berkdb_open} \
+ -create -auto_commit -env $env $omethod $args $filename]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest$tnum.c: Start transactions."
+ # Start two transactions. T1 is the writer, so it's a regular
+ # transaction. T2 is the reader and uses -snapshot.
+ set t1 [$env txn]
+ set txn1 "-txn $t1"
+ set t2 [$env txn -snapshot]
+ set txn2 "-txn $t2"
+
+ # Enter some data using txn1.
+ set key 1
+ set data DATA
+ error_check_good \
+ t1_put [eval {$db put} $txn1 $key [chop_data $method $data]] 0
+
+ # Txn2 cannot see txn1's put, but it does not block.
+ puts "\tTest$tnum.d: Txn2 can't see txn1's put."
+ set ret [eval {$db get} $txn2 $key]
+ error_check_good txn2_get [llength $ret] 0
+
+ # Commit txn1. Txn2 get still can't see txn1's put.
+ error_check_good t1_commit [$t1 commit] 0
+ set ret [eval {$db get} $txn2 $key]
+ error_check_good txn2_get [llength $ret] 0
+ error_check_good db_sync [$db sync] 0
+ set ret [eval {$db get} $txn2 $key]
+ error_check_good txn2_get [llength $ret] 0
+
+ # Start a new txn with -snapshot. It can see the put.
+ puts "\tTest$tnum.e: A new txn can see txn1's put."
+ set t3 [$env txn -snapshot]
+ set txn3 "-txn $t3"
+ set ret [eval {$db get} $txn3 $key]
+ error_check_good \
+ t3_get $ret [list [list $key [pad_data $method $data]]]
+
+ # Commit txns.
+ error_check_good t2_commit [$t2 commit] 0
+ error_check_good t3_commit [$t3 commit] 0
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/db-4.8.30/test/test121.tcl b/db-4.8.30/test/test121.tcl
new file mode 100644
index 0000000..868eb44
--- /dev/null
+++ b/db-4.8.30/test/test121.tcl
@@ -0,0 +1,125 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test121
+# TEST Tests of multi-version concurrency control.
+# TEST
+# TEST MVCC and cursor adjustment.
+# TEST Set up a -snapshot cursor and position it in the middle
+# TEST of a database.
+# TEST Write to the database, both before and after the cursor,
+# TEST and verify that it stays on the same position.
+
+proc test121 { method {tnum "121"} args } {
+ source ./include.tcl
+
+ # This test needs its own env.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+
+ # MVCC is not allowed with queue methods.
+ if { [is_queue $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ puts "\tTest$tnum ($method): MVCC and cursor adjustment."
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set filename "test.db"
+ set pageargs ""
+ set args [split_pageargs $args pageargs]
+
+ # Create transactional env. Specifying -multiversion makes
+ # all databases opened within the env -multiversion.
+
+ env_cleanup $testdir
+ puts "\tTest$tnum.a: Creating txn env."
+
+ # Raise cachesize so this test focuses on cursor adjustment
+ # and not on small cache issues.
+ set cachesize [expr 2 * 1024 * 1024]
+ set max_locks 2000
+ set max_objects 2000
+ set env [eval {berkdb_env -create -cachesize "0 $cachesize 1"}\
+ -lock_max_locks $max_locks -lock_max_objects $max_objects\
+ -txn -multiversion $encargs $pageargs -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open database.
+ puts "\tTest$tnum.b: Creating -multiversion db."
+ set db [eval {berkdb_open} \
+ -create -auto_commit -env $env $omethod $args $pageargs $filename]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Start transactions.
+ puts "\tTest$tnum.c: Start txns with -snapshot."
+ set t1 [$env txn -snapshot]
+ set txn1 "-txn $t1"
+
+ # Enter some data using txn1. Leave holes, by using keys
+ # 2, 4, 6 ....
+ set niter 10000
+ set data DATA
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set key [expr $i * 2]
+ error_check_good t1_put [eval {$db put} $txn1 $key $data.$key] 0
+ }
+ error_check_good t1_commit [$t1 commit] 0
+
+ # Open a read-only cursor.
+ set t2 [$env txn -snapshot]
+ set txn2 "-txn $t2"
+ set cursor [eval {$db cursor} $txn2]
+ error_check_good db_cursor [is_valid_cursor $cursor $db] TRUE
+
+ # Walk the cursor halfway through the database.
+ set i 1
+ set halfway [expr $niter / 2]
+ for { set ret [$cursor get -first] } \
+ { $i <= $halfway } \
+ { set ret [$cursor get -next] } {
+ incr i
+ }
+
+ set currentkey [lindex [lindex $ret 0] 0]
+ set currentdata [lindex [lindex $ret 0] 1]
+
+ # Start a new transaction and use it to enter more data.
+ # Verify that the cursor is not changed.
+ puts "\tTest$tnum.c: Enter more data."
+ set t1 [$env txn -snapshot]
+ set txn1 "-txn $t1"
+
+ # Enter more data, filling in the holes from the first
+ # time around by using keys 1, 3, 5 .... Cursor should
+ # stay on the same item.
+ for { set i 1 } { $i <= $niter } { incr i } {
+ set key [expr [expr $i * 2] - 1]
+ error_check_good t1_put [eval {$db put} $txn1 $key $data.$key] 0
+ set ret [$cursor get -current]
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good current_key $k $currentkey
+ error_check_good current_data $d $currentdata
+ }
+
+ error_check_good t1_commit [$t1 commit] 0
+ error_check_good cursor_close [$cursor close] 0
+ error_check_good t2_commit [$t2 commit] 0
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/db-4.8.30/test/test122.tcl b/db-4.8.30/test/test122.tcl
new file mode 100644
index 0000000..b2577e9
--- /dev/null
+++ b/db-4.8.30/test/test122.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2006-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test122
+# TEST Tests of multi-version concurrency control.
+# TEST
+# TEST MVCC and databases that turn multi-version on and off.
+
+proc test122 { method {tnum "122"} args } {
+ source ./include.tcl
+
+ # This test needs its own env.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test$tnum skipping for env $env"
+ return
+ }
+
+ # MVCC is not allowed with queue methods.
+ if { [is_queue $method] == 1 } {
+ puts "Test$tnum skipping for method $method"
+ return
+ }
+
+ puts "\tTest$tnum ($method): Turning MVCC on and off."
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set pageargs ""
+ split_pageargs $args pageargs
+ set filename "test.db"
+
+ # Create transactional env. Don't specify -multiversion to
+ # the env, because we need to turn it on and off.
+ env_cleanup $testdir
+
+ puts "\tTest$tnum.a: Creating txn env."
+ set cacheargs " -cachesize {0 524288 1} "
+ set env [eval {berkdb_env}\
+ -create $cacheargs -txn $pageargs $encargs -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open database.
+ puts "\tTest$tnum.b: Creating -multiversion db."
+ set db [eval {berkdb_open} -multiversion \
+ -create -auto_commit -env $env $omethod $args $filename]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Put some data. The tcl interface automatically does it
+ # transactionally.
+ set niter 100
+ for { set i 1 } { $i < $niter } { incr i } {
+ set key $i
+ set data DATA.$i
+ error_check_good db_put [eval {$db put} $key $data] 0
+ }
+
+ # Open a read-only handle and also a txn -snapshot handle.
+ puts "\tTest$tnum.c: Open read-only handle and txn -snapshot handle."
+ set t [$env txn -snapshot]
+ set txn "-txn $t"
+ set snapshotdb [eval {berkdb_open} \
+ $txn -env $env $omethod $args $filename]
+ error_check_good snapshotdb [is_valid_db $snapshotdb] TRUE
+ set readonlydb [eval {berkdb_open} \
+ -auto_commit -env $env $omethod $args $filename]
+ error_check_good readonlydb [is_valid_db $readonlydb] TRUE
+
+
+ # Overwrite all the data. The read-only handle will see the
+ # new data and the -snapshot handle will see the old data.
+ puts "\tTest$tnum.d: Overwrite data."
+ for { set i 1 } { $i < $niter } { incr i } {
+ set key $i
+ set data NEWDATA.$i
+ error_check_good db_put [eval {$db put} $key $data] 0
+ }
+
+ puts "\tTest$tnum.e: Check data through handles."
+ for { set i 1 } { $i < $niter } { incr i } {
+ set r_ret [eval {$readonlydb get} $i]
+ set s_ret [eval {$snapshotdb get} $txn $i]
+ set r_key [lindex [lindex $r_ret 0] 0]
+ set r_data [lindex [lindex $r_ret 0] 1]
+ set s_key [lindex [lindex $s_ret 0] 0]
+ set s_data [lindex [lindex $s_ret 0] 1]
+ }
+
+ error_check_good t_commit [$t commit] 0
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good snapshotdb_close [$snapshotdb close] 0
+ error_check_good readonlydb_close [$readonlydb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/db-4.8.30/test/test123.tcl b/db-4.8.30/test/test123.tcl
new file mode 100644
index 0000000..78acbf7
--- /dev/null
+++ b/db-4.8.30/test/test123.tcl
@@ -0,0 +1,80 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test123
+# TEST Concurrent Data Store cdsgroup smoke test.
+# TEST
+# TEST Open a CDS env with -cdb_alldb.
+# TEST Start a "txn" with -cdsgroup.
+# TEST Create two databases in the env, do a cursor put
+# TEST in both within the same txn. This should succeed.
+
+proc test123 { method args } {
+ source ./include.tcl
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Skipping test123 for env $env"
+ return
+ }
+
+ if { [is_queue $method] == 1 } {
+ puts "Skipping test123 for method $method"
+ return
+ }
+ if { [is_partitioned $args] == 1 } {
+ puts "Test123 skipping for partitioned $method"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+ set pageargs ""
+ split_pageargs $args pageargs
+ set dbname test123.db
+ set tnum "123"
+
+ puts "Test$tnum: CDB with cdsgroup ($method)"
+ env_cleanup $testdir
+
+ # Open environment and start cdsgroup "transaction".
+ puts "\tTest$tnum.a: Open env."
+ set env [eval {berkdb_env -create} \
+ $pageargs $encargs -cdb -cdb_alldb -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set txn [$env cdsgroup]
+
+ # Env is created, now set up 2 databases
+ puts "\tTest$tnum.b: Open first database."
+ set db1 [eval {berkdb_open}\
+ -create -env $env $args $omethod -txn $txn $dbname "A"]
+ puts "\tTest$tnum.b1: Open cursor."
+ set curs1 [eval {$db1 cursor} -update -txn $txn]
+ puts "\tTest$tnum.b2: Initialize cursor and do a put."
+ error_check_good curs1_put [eval {$curs1 put} -keyfirst 1 DATA1] 0
+
+ puts "\tTest$tnum.c: Open second database."
+ set db2 [eval {berkdb_open}\
+ -create -env $env $args $omethod -txn $txn $dbname "B"]
+ puts "\tTest$tnum.c1: Open cursor."
+ set curs2 [eval {$db2 cursor} -update -txn $txn]
+ puts "\tTest$tnum.b2: Initialize cursor and do a put."
+ error_check_good curs2_put [eval {$curs2 put} -keyfirst 2 DATA2] 0
+
+ # Clean up.
+ $curs2 close
+ $curs1 close
+ $txn commit
+ $db2 close
+ $db1 close
+ $env close
+
+}
+
diff --git a/db-4.8.30/test/test125.tcl b/db-4.8.30/test/test125.tcl
new file mode 100644
index 0000000..b4f0922
--- /dev/null
+++ b/db-4.8.30/test/test125.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2009-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST test125
+# TEST Test cursor comparison API.
+# TEST
+# TEST The cursor comparison API reports whether two cursors within
+# TEST the same database are at the same position. It does not report
+# TEST any information about relative position.
+# TEST
+# TEST 1. Test two uninitialized cursors (error).
+# TEST 2. Test one uninitialized cursor, one initialized (error).
+# TEST 3. Test two cursors in different databases (error).
+# TEST 4. Put two cursors in the same place, test for match. Walk
+# TEST them back and forth a bit, more matching.
+# TEST 5. Two cursors in the same spot. Delete through one.
+
+proc test125 { method args } {
+ global errorInfo
+ source ./include.tcl
+ set tnum 125
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tnum.db
+ set testfile2 $testdir/test$tnum-2.db
+ set env NULL
+ } else {
+ set testfile test$tnum.db
+ set testfile2 test$tnum-2.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set t ""
+ set txn ""
+
+ # Run the test with and without duplicates, and with and without
+ # large data items.
+ foreach dupflag { "" "-dup" "-dup -dupsort" } {
+ if { [is_compressed $args] && $dupflag == "-dup" } {
+ puts "Skipping unsorted dups for btree with compression"
+ continue
+ }
+ foreach bigdata { 0 1 } {
+ set msg ""
+ if { $bigdata == 1 } {
+ set msg "with big data"
+ }
+ puts "Test$tnum ($method $dupflag $msg):\
+ Cursor comparison API."
+ if { [llength $dupflag] > 0 } {
+ if { [is_record_based $method] ||\
+ [is_rbtree $method] } {
+ puts "Skipping test for method $method\
+ with duplicates."
+ continue
+ }
+ set dups 1
+ } else {
+ set dups 0
+ }
+
+ # Testdir will get reset from the env's home dir back
+ # to the default if this calls something that sources
+ # include.tcl, since testdir is a global. Set it correctly
+ # here each time through the loop.
+ #
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest$tnum.a: Test failure cases."
+ # Open two databases.
+ set db [eval {berkdb_open_noerr} -create -mode 0644 \
+ $omethod $args $dupflag {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set db2 [eval {berkdb_open_noerr} -create -mode 0644 \
+ $omethod $args $dupflag {$testfile2}]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+
+ # Populate the databases.
+ set nentries 1000
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ populate $db $method $t $nentries $dups $bigdata
+ populate $db2 $method $t $nentries $dups $bigdata
+
+ # Test error conditions.
+ puts "\t\tTest$tnum.a1: Uninitialized cursors."
+ set c1 [eval {$db cursor} $txn]
+ set c2 [eval {$db cursor} $txn]
+ set ret [catch {$c1 cmp $c2} res]
+ error_check_good uninitialized_cursors $ret 1
+
+ puts "\t\tTest$tnum.a2:\
+ One initialized, one uninitialized cursor."
+ $c1 get -first
+ set ret [catch {$c1 cmp $c2} res]
+ error_check_good one_uninitialized_cursor $ret 1
+
+ puts "\t\tTest$tnum.a3: Cursors in different databases."
+ set c3 [eval {$db2 cursor} $txn]
+ set ret [$c3 get -first]
+ set ret [catch {$c1 cmp $c3} res]
+ error_check_good cursors_in_different_databases $ret 1
+
+ # Clean up second database - we won't be using it again.
+ $c3 close
+ $db2 close
+
+ # Test valid conditions.
+ #
+ # Initialize second cursor to -first. Cursor cmp should
+ # match; c1 was already there.
+ puts "\tTest$tnum.b: Cursors initialized to -first."
+ set ret [$c2 get -first]
+ error_check_good c1_and_c2_on_first [$c1 cmp $c2] 0
+
+ # Walk to the end. We should alternate between
+ # matching and not matching.
+ puts "\tTest$tnum.c: Walk cursors to the last item."
+ for { set i 1 } { $i < $nentries } { incr i } {
+
+ # First move c1; cursors won't match.
+ set ret [$c1 get -next]
+ error_check_bad cmp_does_not_match [$c1 cmp $c2] 0
+
+ # Now move c2; cursors will match again.
+ set ret [$c2 get -next]
+ error_check_good cmp_matches [$c1 cmp $c2] 0
+ }
+
+ # Now do it in reverse, starting at -last and backing up.
+ puts "\tTest$tnum.d: Cursors initialized to -last."
+ set ret [$c1 get -last]
+ set ret [$c2 get -last]
+ error_check_good c1_and_c2_on_last [$c1 cmp $c2] 0
+
+ puts "\tTest$tnum.e: Walk cursors back to the first item."
+ for { set i 1 } { $i < $nentries } { incr i } {
+
+ # First move c1; cursors won't match.
+ set ret [$c1 get -prev]
+ error_check_bad cmp_does_not_match [$c1 cmp $c2] 0
+
+ # Now move c2; cursors will match again.
+ set ret [$c2 get -prev]
+ error_check_good cmp_matches [$c1 cmp $c2] 0
+ }
+
+ # A cursor delete leaves the cursor in the same place, so a
+ # comparison should still work.
+ puts "\tTest$tnum.f:\
+ Position comparison works with cursor deletes."
+ set ret [$c1 get -first]
+ set ret [$c2 get -first]
+
+ # Do the cursor walk again, deleting as we go.
+ puts "\tTest$tnum.g: Cursor walk with deletes."
+ for { set i 1 } { $i < $nentries } { incr i } {
+
+ # First move c1; cursors won't match.
+ set ret [$c1 get -next]
+ error_check_bad cmp_does_not_match [$c1 cmp $c2] 0
+
+ # Now move c2; cursors will match again.
+ set ret [$c2 get -next]
+ error_check_good cmp_matches [$c1 cmp $c2] 0
+
+ # Now delete through c2; cursors should still match.
+ set ret [$c2 del]
+ error_check_good cmp_still_matches [$c1 cmp $c2] 0
+ }
+
+ # Close cursors and database; commit txn.
+ error_check_good c1_close [$c1 close] 0
+ error_check_good c2_close [$c2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
diff --git a/db-4.8.30/test/testparams.tcl b/db-4.8.30/test/testparams.tcl
new file mode 100644
index 0000000..fec4dfe
--- /dev/null
+++ b/db-4.8.30/test/testparams.tcl
@@ -0,0 +1,511 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+
+source ./include.tcl
+global is_freebsd_test
+global tcl_platform
+global rpc_tests
+global one_test
+global serial_tests
+set serial_tests {rep002 rep005 rep016 rep020 rep022 rep026 rep031 rep063 \
+ rep078 rep079}
+
+set subs {bigfile dead env fop lock log memp multi_repmgr mutex plat recd rep \
+ repmgr rpc rsrc sdb sdbtest sec si test txn}
+
+set test_names(bigfile) [list bigfile001 bigfile002]
+set test_names(compact) [list test111 test112 test113 test114 test115 test117]
+set test_names(dead) [list dead001 dead002 dead003 dead004 dead005 dead006 \
+ dead007]
+set test_names(elect) [list rep002 rep005 rep016 rep020 rep022 rep026 \
+ rep063 rep067 rep069 rep076]
+set test_names(env) [list env001 env002 env003 env004 env005 env006 \
+ env007 env008 env009 env010 env011 env012 env013 env014 env015 env016 \
+ env017 env018]
+set test_names(fop) [list fop001 fop002 fop003 fop004 fop005 fop006 \
+ fop007 fop008]
+set test_names(init) [list rep029 rep030 rep031 rep033 rep037 rep038 rep039\
+ rep055 rep060 rep061 rep062 rep070 rep072 rep084 rep085 rep086 rep087]
+set test_names(inmemdb) [list fop007 fop008 sdb013 sdb014 \
+ sdb015 sdb016 sdb017 sdb018 sdb019 sdb020]
+set test_names(lock) [list lock001 lock002 lock003 lock004 lock005 lock006]
+set test_names(log) [list log001 log002 log003 log004 log005 log006 \
+ log007 log008 log009]
+set test_names(memp) [list memp001 memp002 memp003 memp004]
+set test_names(mutex) [list mut001 mut002 mut003]
+set test_names(plat) [list plat001]
+set test_names(recd) [list recd001 recd002 recd003 recd004 recd005 recd006 \
+ recd007 recd008 recd009 recd010 recd011 recd012 recd013 recd014 recd015 \
+ recd016 recd017 recd018 recd019 recd020 recd022 recd023 recd024]
+set test_names(rep) [list rep001 rep002 rep003 rep005 rep006 rep007 \
+ rep008 rep009 rep010 rep011 rep012 rep013 rep014 rep015 rep016 rep017 \
+ rep018 rep019 rep020 rep021 rep022 rep023 rep024 rep025 rep026 rep027 \
+ rep028 rep029 rep030 rep031 rep032 rep033 rep034 rep035 rep036 rep037 \
+ rep038 rep039 rep040 rep041 rep042 rep043 rep044 rep045 rep046 rep047 \
+ rep048 rep049 rep050 rep051 rep052 rep053 rep054 rep055 \
+ rep058 rep060 rep061 rep062 rep063 rep064 rep065 rep066 rep067 \
+ rep068 rep069 rep070 rep071 rep072 rep073 rep074 rep075 rep076 rep077 \
+ rep078 rep079 rep080 rep081 rep082 rep083 rep084 rep085 rep086 rep087 rep088]
+set test_names(rep_inmem) [list rep001 rep005 rep006 rep007 rep010 rep012 rep013\
+ rep014 rep016 rep019 rep020 rep021 rep022 rep023 rep024 rep025 \
+ rep026 rep028 rep029 rep030 rep031 rep032 rep033 rep034 rep035 \
+ rep037 rep038 rep039 rep040 rep041 rep044 rep045 rep046 rep047 \
+ rep048 rep049 rep050 rep051 rep052 rep053 rep054 rep055 rep060 \
+ rep061 rep062 rep063 rep064 rep066 rep067 rep069 rep070 rep071 \
+ rep072 rep073 rep074 rep075 rep076 rep077 rep080 ]
+set test_names(repmgr) [list repmgr001 repmgr002 repmgr003 repmgr004 \
+ repmgr005 repmgr006 repmgr007 repmgr008 repmgr009 repmgr010 repmgr011 \
+ repmgr012 repmgr013 repmgr014 repmgr015 repmgr016 repmgr017 repmgr018 \
+ repmgr019]
+set test_names(multi_repmgr) [list repmgr022 repmgr023 repmgr024 \
+ repmgr025 repmgr026 repmgr027 repmgr028 repmgr029 repmgr030 repmgr031 \
+ repmgr032]
+set test_names(rpc) [list rpc001 rpc002 rpc003 rpc004 rpc005 rpc006]
+set test_names(rsrc) [list rsrc001 rsrc002 rsrc003 rsrc004]
+set test_names(sdb) [list sdb001 sdb002 sdb003 sdb004 sdb005 sdb006 \
+ sdb007 sdb008 sdb009 sdb010 sdb011 sdb012 sdb013 sdb014 sdb015 sdb016 \
+ sdb017 sdb018 sdb019 sdb020 ]
+set test_names(sdbtest) [list sdbtest001 sdbtest002]
+set test_names(sec) [list sec001 sec002]
+set test_names(si) [list si001 si002 si003 si004 si005 si006 si007 si008]
+set test_names(test) [list test001 test002 test003 test004 test005 \
+ test006 test007 test008 test009 test010 test011 test012 test013 test014 \
+ test015 test016 test017 test018 test019 test020 test021 test022 test023 \
+ test024 test025 test026 test027 test028 test029 test030 test031 test032 \
+ test033 test034 test035 test036 test037 test038 test039 test040 test041 \
+ test042 test043 test044 test045 test046 test047 test048 test049 test050 \
+ test051 test052 test053 test054 test055 test056 test057 test058 test059 \
+ test060 test061 test062 test063 test064 test065 test066 test067 test068 \
+ test069 test070 test071 test072 test073 test074 test076 test077 \
+ test078 test079 test081 test082 test083 test084 test085 test086 \
+ test087 test088 test089 test090 test091 test092 test093 test094 test095 \
+ test096 test097 test098 test099 test100 test101 test102 test103 test107 \
+ test109 test110 test111 test112 test113 test114 test115 test116 test117 \
+ test119 test120 test121 test122 test123 test125]
+
+set test_names(txn) [list txn001 txn002 txn003 txn004 txn005 txn006 \
+ txn007 txn008 txn009 txn010 txn011 txn012 txn013 txn014]
+
+set rpc_tests(berkeley_db_svc) [concat $test_names(test) $test_names(sdb)]
+set rpc_tests(berkeley_db_cxxsvc) $test_names(test)
+set rpc_tests(berkeley_db_javasvc) $test_names(test)
+
+# FreeBSD, in version 5.4, has problems dealing with large messages
+# over RPC. Exclude those tests. We believe these problems are
+# resolved for later versions. SR [#13542]
+set freebsd_skip_tests_for_rpc [list test003 test008 test009 test012 test017 \
+ test028 test081 test095 test102 test103 test119 sdb004 sdb011]
+set freebsd_5_4 0
+if { $is_freebsd_test } {
+ set version $tcl_platform(osVersion)
+ if { [is_substr $version "5.4"] } {
+ set freebsd_5_4 1
+ }
+}
+if { $freebsd_5_4 } {
+ foreach svc {berkeley_db_svc berkeley_db_cxxsvc berkeley_db_javasvc} {
+ foreach test $freebsd_skip_tests_for_rpc {
+ set idx [lsearch -exact $rpc_tests($svc) $test]
+ if { $idx >= 0 } {
+ set rpc_tests($svc)\
+ [lreplace $rpc_tests($svc) $idx $idx]
+ }
+ }
+ }
+}
+
+# JE tests are a subset of regular RPC tests -- exclude these ones.
+# be fixable by modifying tests dealing with unsorted duplicates, second line
+# will probably never work unless certain features are added to JE (record
+# numbers, bulk get, etc.).
+set je_exclude {(?x) # Turn on extended syntax
+ test(010|026|027|028|030|031|032|033|034| # These should be fixable by
+ 035|039|041|046|047|054|056|057|062| # modifying tests to avoid
+ 066|073|081|085)| # unsorted dups, etc.
+
+ test(011|017|018|022|023|024|029|040|049| # Not expected to work with
+ 062|083|095) # JE until / unless features
+ # are added to JE (record
+ # numbers, bulk gets, etc.)
+}
+set rpc_tests(berkeley_dbje_svc) [lsearch -all -inline -not -regexp \
+ $rpc_tests(berkeley_db_svc) $je_exclude]
+
+# Source all the tests, whether we're running one or many.
+foreach sub $subs {
+ foreach test $test_names($sub) {
+ source $test_path/$test.tcl
+ }
+}
+
+# Reset test_names if we're running only one test.
+if { $one_test != "ALL" } {
+ foreach sub $subs {
+ set test_names($sub) ""
+ }
+ set type [string trim $one_test 0123456789]
+ set test_names($type) [list $one_test]
+}
+
+source $test_path/archive.tcl
+source $test_path/backup.tcl
+source $test_path/byteorder.tcl
+source $test_path/dbm.tcl
+source $test_path/foputils.tcl
+source $test_path/hsearch.tcl
+source $test_path/join.tcl
+source $test_path/logtrack.tcl
+source $test_path/ndbm.tcl
+source $test_path/parallel.tcl
+source $test_path/reputils.tcl
+source $test_path/reputilsnoenv.tcl
+source $test_path/sdbutils.tcl
+source $test_path/shelltest.tcl
+source $test_path/sijointest.tcl
+source $test_path/siutils.tcl
+source $test_path/testutils.tcl
+source $test_path/upgrade.tcl
+
+set parms(recd001) 0
+set parms(recd002) 0
+set parms(recd003) 0
+set parms(recd004) 0
+set parms(recd005) ""
+set parms(recd006) 0
+set parms(recd007) ""
+set parms(recd008) {4 4}
+set parms(recd009) 0
+set parms(recd010) 0
+set parms(recd011) {200 15 1}
+set parms(recd012) {0 49 25 100 5}
+set parms(recd013) 100
+set parms(recd014) ""
+set parms(recd015) ""
+set parms(recd016) ""
+set parms(recd017) 0
+set parms(recd018) 10
+set parms(recd019) 50
+set parms(recd020) ""
+set parms(recd022) ""
+set parms(recd023) ""
+set parms(recd024) ""
+set parms(rep001) {1000 "001"}
+set parms(rep002) {10 3 "002"}
+set parms(rep003) "003"
+set parms(rep005) ""
+set parms(rep006) {1000 "006"}
+set parms(rep007) {10 "007"}
+set parms(rep008) {10 "008"}
+set parms(rep009) {10 "009"}
+set parms(rep010) {100 "010"}
+set parms(rep011) "011"
+set parms(rep012) {10 "012"}
+set parms(rep013) {10 "013"}
+set parms(rep014) {10 "014"}
+set parms(rep015) {100 "015" 3}
+set parms(rep016) ""
+set parms(rep017) {10 "017"}
+set parms(rep018) {10 "018"}
+set parms(rep019) {3 "019"}
+set parms(rep020) ""
+set parms(rep021) {3 "021"}
+set parms(rep022) ""
+set parms(rep023) {10 "023"}
+set parms(rep024) {1000 "024"}
+set parms(rep025) {200 "025"}
+set parms(rep026) ""
+set parms(rep027) {1000 "027"}
+set parms(rep028) {100 "028"}
+set parms(rep029) {200 "029"}
+set parms(rep030) {500 "030"}
+set parms(rep031) {200 "031"}
+set parms(rep032) {200 "032"}
+set parms(rep033) {200 "033"}
+set parms(rep034) {2 "034"}
+set parms(rep035) {100 "035"}
+set parms(rep036) {200 "036"}
+set parms(rep037) {1500 "037"}
+set parms(rep038) {200 "038"}
+set parms(rep039) {200 "039"}
+set parms(rep040) {200 "040"}
+set parms(rep041) {500 "041"}
+set parms(rep042) {10 "042"}
+set parms(rep043) {25 "043"}
+set parms(rep044) {"044"}
+set parms(rep045) {"045"}
+set parms(rep046) {200 "046"}
+set parms(rep047) {200 "047"}
+set parms(rep048) {3000 "048"}
+set parms(rep049) {10 "049"}
+set parms(rep050) {10 "050"}
+set parms(rep051) {1000 "051"}
+set parms(rep052) {200 "052"}
+set parms(rep053) {200 "053"}
+set parms(rep054) {200 "054"}
+set parms(rep055) {200 "055"}
+set parms(rep058) "058"
+set parms(rep060) {200 "060"}
+set parms(rep061) {500 "061"}
+set parms(rep062) "062"
+set parms(rep063) ""
+set parms(rep064) {10 "064"}
+set parms(rep065) {3}
+set parms(rep066) {10 "066"}
+set parms(rep067) ""
+set parms(rep068) {"068"}
+set parms(rep069) {200 "069"}
+set parms(rep070) {200 "070"}
+set parms(rep071) { 10 "071"}
+set parms(rep072) {200 "072"}
+set parms(rep073) {200 "073"}
+set parms(rep074) {"074"}
+set parms(rep075) {"075"}
+set parms(rep076) ""
+set parms(rep077) {"077"}
+set parms(rep078) {"078"}
+set parms(rep079) {"079"}
+set parms(rep080) {200 "080"}
+set parms(rep081) {200 "081"}
+set parms(rep082) {200 "082"}
+set parms(rep083) {200 "083"}
+set parms(rep084) {200 "084"}
+set parms(rep085) {20 "085"}
+set parms(rep086) {"086"}
+set parms(rep087) {200 "087"}
+set parms(rep088) {20 "088"}
+set parms(repmgr001) {100 "001"}
+set parms(repmgr002) {100 "002"}
+set parms(repmgr003) {100 "003"}
+set parms(repmgr004) {100 "004"}
+set parms(repmgr005) {100 "005"}
+set parms(repmgr006) {1000 "006"}
+set parms(repmgr007) {100 "007"}
+set parms(repmgr008) {100 "008"}
+set parms(repmgr009) {10 "009"}
+set parms(repmgr010) {100 "010"}
+set parms(repmgr011) {100 "011"}
+set parms(repmgr012) {100 "012"}
+set parms(repmgr013) {100 "013"}
+set parms(repmgr014) {100 "014"}
+set parms(repmgr015) {100 "015"}
+set parms(repmgr016) {100 "016"}
+set parms(repmgr017) {1000 "017"}
+set parms(repmgr018) {100 "018"}
+set parms(repmgr019) {100 "019"}
+set parms(repmgr022) ""
+set parms(repmgr023) ""
+set parms(repmgr024) ""
+set parms(repmgr025) ""
+set parms(repmgr026) ""
+set parms(repmgr027) ""
+set parms(repmgr028) ""
+set parms(repmgr029) ""
+set parms(repmgr030) ""
+set parms(repmgr031) ""
+set parms(repmgr032) ""
+set parms(subdb001) ""
+set parms(subdb002) 10000
+set parms(subdb003) 1000
+set parms(subdb004) ""
+set parms(subdb005) 100
+set parms(subdb006) 100
+set parms(subdb007) ""
+set parms(subdb008) ""
+set parms(subdb009) ""
+set parms(subdb010) ""
+set parms(subdb011) {13 10}
+set parms(subdb012) ""
+set parms(sdb001) ""
+set parms(sdb002) 10000
+set parms(sdb003) 1000
+set parms(sdb004) ""
+set parms(sdb005) 100
+set parms(sdb006) 100
+set parms(sdb007) ""
+set parms(sdb008) ""
+set parms(sdb009) ""
+set parms(sdb010) ""
+set parms(sdb011) {13 10}
+set parms(sdb012) ""
+set parms(sdb013) 10
+set parms(sdb014) ""
+set parms(sdb015) 1000
+set parms(sdb016) 100
+set parms(sdb017) ""
+set parms(sdb018) 100
+set parms(sdb019) 100
+set parms(sdb020) 10
+set parms(si001) {200 "001"}
+set parms(si002) {200 "002"}
+set parms(si003) {200 "003"}
+set parms(si004) {200 "004"}
+set parms(si005) {200 "005"}
+set parms(si006) {200 "006"}
+set parms(si007) {10 "007"}
+set parms(si008) {10 "008"}
+set parms(test001) {10000 0 0 "001"}
+set parms(test002) 10000
+set parms(test003) ""
+set parms(test004) {10000 "004" 0}
+set parms(test005) 10000
+set parms(test006) {10000 0 "006" 5}
+set parms(test007) {10000 "007" 5}
+set parms(test008) {"008" 0}
+set parms(test009) ""
+set parms(test010) {10000 5 "010"}
+set parms(test011) {10000 5 "011"}
+set parms(test012) ""
+set parms(test013) 10000
+set parms(test014) 10000
+set parms(test015) {7500 0}
+set parms(test016) 10000
+set parms(test017) {0 19 "017"}
+set parms(test018) 10000
+set parms(test019) 10000
+set parms(test020) 10000
+set parms(test021) 10000
+set parms(test022) ""
+set parms(test023) ""
+set parms(test024) 10000
+set parms(test025) {10000 0 "025"}
+set parms(test026) {2000 5 "026"}
+set parms(test027) {100}
+set parms(test028) ""
+set parms(test029) 10000
+set parms(test030) 10000
+set parms(test031) {10000 5 "031"}
+set parms(test032) {10000 5 "032" 0}
+set parms(test033) {10000 5 "033"}
+set parms(test034) 10000
+set parms(test035) 10000
+set parms(test036) 10000
+set parms(test037) 100
+set parms(test038) {10000 5 "038"}
+set parms(test039) {10000 5 "039"}
+set parms(test040) 10000
+set parms(test041) 10000
+set parms(test042) 1000
+set parms(test043) 10000
+set parms(test044) {5 10 0}
+set parms(test045) 1000
+set parms(test046) ""
+set parms(test047) ""
+set parms(test048) ""
+set parms(test049) ""
+set parms(test050) ""
+set parms(test051) ""
+set parms(test052) ""
+set parms(test053) ""
+set parms(test054) ""
+set parms(test055) ""
+set parms(test056) ""
+set parms(test057) ""
+set parms(test058) ""
+set parms(test059) ""
+set parms(test060) ""
+set parms(test061) ""
+set parms(test062) {200 200 "062"}
+set parms(test063) ""
+set parms(test064) ""
+set parms(test065) ""
+set parms(test066) ""
+set parms(test067) {1000 "067"}
+set parms(test068) ""
+set parms(test069) {50 "069"}
+set parms(test070) {4 2 1000 CONSUME 0 -txn "070"}
+set parms(test071) {1 1 10000 CONSUME 0 -txn "071"}
+set parms(test072) {512 20 "072"}
+set parms(test073) {512 50 "073"}
+set parms(test074) {-nextnodup 100 "074"}
+set parms(test076) {1000 "076"}
+set parms(test077) {1000 "077"}
+set parms(test078) {100 512 "078"}
+set parms(test079) {10000 512 "079" 20}
+set parms(test081) {13 "081"}
+set parms(test082) {-prevnodup 100 "082"}
+set parms(test083) {512 5000 2}
+set parms(test084) {10000 "084" 65536}
+set parms(test085) {512 3 10 "085"}
+set parms(test086) ""
+set parms(test087) {512 50 "087"}
+set parms(test088) ""
+set parms(test089) 1000
+set parms(test090) {10000 "090"}
+set parms(test091) {4 2 1000 0 "091"}
+set parms(test092) {1000}
+set parms(test093) {10000 "093"}
+set parms(test094) {10000 10 "094"}
+set parms(test095) {"095"}
+set parms(test096) {512 1000 19}
+set parms(test097) {500 400}
+set parms(test098) ""
+set parms(test099) 10000
+set parms(test100) {10000 "100"}
+set parms(test101) {1000 -txn "101"}
+set parms(test102) {1000 "102"}
+set parms(test103) {100 4294967250 "103"}
+set parms(test107) ""
+set parms(test109) {"109"}
+set parms(test110) {10000 3}
+set parms(test111) {10000 "111"}
+set parms(test112) {80000 "112"}
+set parms(test113) {10000 5 "113"}
+set parms(test114) {10000 "114"}
+set parms(test115) {10000 "115"}
+set parms(test116) {"116"}
+set parms(test117) {10000 "117"}
+set parms(test119) {"119"}
+set parms(test120) {"120"}
+set parms(test121) {"121"}
+set parms(test122) {"122"}
+set parms(test123) ""
+set parms(test125) ""
+
+# RPC server executables. Each of these is tested (if it exists)
+# when running the RPC tests.
+set svc_list { berkeley_db_svc berkeley_db_cxxsvc \
+ berkeley_db_javasvc berkeley_dbje_svc }
+set rpc_svc berkeley_db_svc
+
+# Shell script tests. Each list entry is a {directory filename} pair,
+# invoked with "/bin/sh filename".
+set shelltest_list {
+ { scr001 chk.code }
+ { scr002 chk.def }
+ { scr003 chk.define }
+ { scr004 chk.javafiles }
+ { scr005 chk.nl }
+ { scr006 chk.offt }
+ { scr007 chk.proto }
+ { scr008 chk.pubdef }
+ { scr009 chk.srcfiles }
+ { scr010 chk.str }
+ { scr011 chk.tags }
+ { scr012 chk.vx_code }
+ { scr013 chk.stats }
+ { scr014 chk.err }
+ { scr015 chk.cxxtests }
+ { scr016 chk.bdb }
+ { scr017 chk.db185 }
+ { scr018 chk.comma }
+ { scr019 chk.include }
+ { scr020 chk.inc }
+ { scr021 chk.flags }
+ { scr022 chk.rr }
+ { scr023 chk.q }
+ { scr024 chk.bdb }
+ { scr025 chk.cxxmulti }
+ { scr026 chk.method }
+ { scr027 chk.javas }
+ { scr028 chk.rtc }
+ { scr029 chk.get }
+ { scr030 chk.build }
+ { scr031 chk.copy }
+ { scr032 chk.rpc }
+ { scr033 chk.codegen }
+ { scr034 chk.mtx }
+ { scr035 chk.osdir }
+}
diff --git a/db-4.8.30/test/testutils.tcl b/db-4.8.30/test/testutils.tcl
new file mode 100644
index 0000000..98e16e2
--- /dev/null
+++ b/db-4.8.30/test/testutils.tcl
@@ -0,0 +1,3908 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Test system utilities
+#
+# Timestamp -- print time along with elapsed time since last invocation
+# of timestamp.
+proc timestamp {{opt ""}} {
+ global __timestamp_start
+
+ set now [clock seconds]
+
+ # -c accurate to the click, instead of the second.
+ # -r seconds since the Epoch
+ # -t current time in the format expected by db_recover -t.
+ # -w wallclock time
+ # else wallclock plus elapsed time.
+ if {[string compare $opt "-r"] == 0} {
+ return $now
+ } elseif {[string compare $opt "-t"] == 0} {
+ return [clock format $now -format "%y%m%d%H%M.%S"]
+ } elseif {[string compare $opt "-w"] == 0} {
+ return [clock format $now -format "%c"]
+ } else {
+ if {[string compare $opt "-c"] == 0} {
+ set printclicks 1
+ } else {
+ set printclicks 0
+ }
+
+ if {[catch {set start $__timestamp_start}] != 0} {
+ set __timestamp_start $now
+ }
+ set start $__timestamp_start
+
+ set elapsed [expr $now - $start]
+ set the_time [clock format $now -format ""]
+ set __timestamp_start $now
+
+ if { $printclicks == 1 } {
+ set pc_print [format ".%08u" [__fix_num [clock clicks]]]
+ } else {
+ set pc_print ""
+ }
+
+ format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \
+ [__fix_num [clock format $now -format "%H"]] \
+ [__fix_num [clock format $now -format "%M"]] \
+ [__fix_num [clock format $now -format "%S"]] \
+ [expr $elapsed / 3600] \
+ [expr ($elapsed % 3600) / 60] \
+ [expr ($elapsed % 3600) % 60]
+ }
+}
+
+proc __fix_num { num } {
+ set num [string trimleft $num "0"]
+ if {[string length $num] == 0} {
+ set num "0"
+ }
+ return $num
+}
+
+# Add a {key,data} pair to the specified database where
+# key=filename and data=file contents.
+proc put_file { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+
+ set ret [eval {$db put} $txn $flags {$file $data}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=filename and data=file contents and then write the
+# data to the specified file.
+proc get_file { db txn flags file outfile } {
+ source ./include.tcl
+
+ set fid [open $outfile w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $txn $flags {$file}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+}
+
+# Add a {key,data} pair to the specified database where
+# key=file contents and data=file name.
+proc put_file_as_key { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ # Use not the file contents, but the file name concatenated
+ # before the file contents, as a key, to ensure uniqueness.
+ set data $file$filecont
+
+ set ret [eval {$db put} $txn $flags {$data $file}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=file contents and data=file name
+proc get_file_as_key { db txn flags file} {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ set data $file$filecont
+
+ return [eval {$db get} $txn $flags {$data}]
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_file {
+ dbname env outfile checkfunc dump_func beg cont args} {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $args $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_subfile {
+ dbname env outfile checkfunc dump_func beg cont subdb} {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open -rdonly -unknown} \
+ $envarg $encarg {$dbname $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Sequentially read a file and call checkfunc on each key/data pair.
+# Dump the keys out to the file specified by outfile.
+proc dump_file { db txn outfile {checkfunc NONE} } {
+ source ./include.tcl
+
+ dump_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+proc dump_file_direction { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ dump_file_walk $c $outfile $checkfunc $start $continue
+ error_check_good curs_close [$c close] 0
+}
+
+proc dump_file_walk { c outfile checkfunc start continue {flag ""} } {
+ set outf [open $outfile w]
+ for {set d [eval {$c get} $flag $start] } \
+ { [llength $d] != 0 } \
+ {set d [eval {$c get} $flag $continue] } {
+ set kd [lindex $d 0]
+ set k [lindex $kd 0]
+ set d2 [lindex $kd 1]
+ if { $checkfunc != "NONE" } {
+ $checkfunc $k $d2
+ }
+ puts $outf $k
+ # XXX: Geoff Mainland
+ # puts $outf "$k $d2"
+ }
+ close $outf
+}
+
+proc dump_binkey_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_binkey_file_direction $db $txn $outfile $checkfunc \
+ "-first" "-next"
+}
+proc dump_bin_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_bin_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+# Note: the following procedure assumes that the binary-file-as-keys were
+# inserted into the database by put_file_as_key, and consist of the file
+# name followed by the file contents as key, to ensure uniqueness.
+proc dump_binkey_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ set inf $d1
+ for {set d [$c get $begin] } { [llength $d] != 0 } \
+ {set d [$c get $cont] } {
+ set kd [lindex $d 0]
+ set keyfile [lindex $kd 0]
+ set data [lindex $kd 1]
+
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+
+ # Chop off the first few bytes--that's the file name,
+ # added for uniqueness in put_file_as_key, which we don't
+ # want in the regenerated file.
+ set namelen [string length $data]
+ set keyfile [string range $keyfile $namelen end]
+ puts -nonewline $ofid $keyfile
+ close $ofid
+
+ $checkfunc $data $d1
+ puts $outf $data
+ flush $outf
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove $d1
+}
+
+proc dump_bin_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+
+ for {set d [$c get $begin] } \
+ { [llength $d] != 0 } {set d [$c get $cont] } {
+ set k [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $k $d1
+ puts $outf $k
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove -f $d1
+}
+
+proc make_data_str { key } {
+ set datastr ""
+ for {set i 0} {$i < 10} {incr i} {
+ append datastr $key
+ }
+ return $datastr
+}
+
+proc error_check_bad { func result bad {txn 0}} {
+ if { [binary_compare $result $bad] == 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp] $func returned error value $bad"
+ }
+}
+
+proc error_check_good { func result desired {txn 0} } {
+ if { [binary_compare $desired $result] != 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp]\
+ $func: expected $desired, got $result"
+ }
+}
+
+proc error_check_match { note result desired } {
+ if { ![string match $desired $result] } {
+ error "FAIL:[timestamp]\
+ $note: expected $desired, got $result"
+ }
+}
+
+# Locks have the prefix of their manager.
+proc is_substr { str sub } {
+ if { [string first $sub $str] == -1 } {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+proc is_serial { str } {
+ global serial_tests
+
+ foreach test $serial_tests {
+ if { [is_substr $str $test] == 1 } {
+ return 1
+ }
+ }
+ return 0
+}
+
+proc release_list { l } {
+
+ # Now release all the locks
+ foreach el $l {
+ catch { $el put } ret
+ error_check_good lock_put $ret 0
+ }
+}
+
+proc debug { {stop 0} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+
+ set __debug_on 1
+ set __debug_print 1
+ set __debug_test $stop
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_check { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_file_check { db txn tmpfile dlist } {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ if { [string compare $key $lastkey] != 0 } {
+ #
+ # If we changed files read in new contents.
+ #
+ set fid [open $key r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+ }
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $filecont
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Parse duplicate data entries of the form N:data. Data_of returns
+# the data part; id_of returns the numerical part
+proc data_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+ return [ string range $str [expr $ndx + 1] end]
+}
+
+proc id_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+
+ return [ string range $str 0 [expr $ndx - 1]]
+}
+
+proc nop { {args} } {
+ return
+}
+
+# Partial put test procedure.
+# Munges a data val through three different partial puts. Stores
+# the final munged string in the dvals array so that you can check
+# it later (dvals should be global). We take the characters that
+# are being replaced, make them capitals and then replicate them
+# some number of times (n_add). We do this at the beginning of the
+# data, at the middle and at the end. The parameters are:
+# db, txn, key -- as per usual. Data is the original data element
+# from which we are starting. n_replace is the number of characters
+# that we will replace. n_add is the number of times we will add
+# the replaced string back in.
+proc partial_put { method db txn gflags key data n_replace n_add } {
+ global dvals
+ source ./include.tcl
+
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+
+ eval {$db put} $txn {$key [chop_data $method $data]}
+
+ # Beginning change
+ set s [string range $data 0 [ expr $n_replace - 1 ] ]
+ set repl [ replicate [string toupper $s] $n_add ]
+
+ # This is gross, but necessary: if this is a fixed-length
+ # method, and the chopped length of $repl is zero,
+ # it's because the original string was zero-length and our data item
+ # is all nulls. Set repl to something non-NULL.
+ if { [is_fixed_length $method] && \
+ [string length [chop_data $method $repl]] == 0 } {
+ set repl [replicate "." $n_add]
+ }
+
+ set newstr [chop_data $method $repl[string range $data $n_replace end]]
+ set ret [eval {$db put} $txn {-partial [list 0 $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # End Change
+ set len [string length $newstr]
+ set spl [expr $len - $n_replace]
+ # Handle case where $n_replace > $len
+ if { $spl < 0 } {
+ set spl 0
+ }
+
+ set s [string range $newstr [ expr $len - $n_replace ] end ]
+ # Handle zero-length keys
+ if { [string length $s] == 0 } { set s "A" }
+
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method \
+ [string range $newstr 0 [expr $spl - 1 ] ]$repl]
+
+ set ret [eval {$db put} $txn \
+ {-partial [list $spl $n_replace] $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # Middle Change
+ set len [string length $newstr]
+ set mid [expr $len / 2 ]
+ set beg [expr $mid - [expr $n_replace / 2] ]
+ set end [expr $beg + $n_replace - 1]
+ set s [string range $newstr $beg $end]
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method [string range $newstr 0 \
+ [expr $beg - 1 ] ]$repl[string range $newstr [expr $end + 1] end]]
+
+ set ret [eval {$db put} $txn {-partial [list $beg $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ set dvals($key) [pad_data $method $newstr]
+}
+
+proc replicate { str times } {
+ set res $str
+ for { set i 1 } { $i < $times } { set i [expr $i * 2] } {
+ append res $res
+ }
+ return $res
+}
+
+proc repeat { str n } {
+ set ret ""
+ while { $n > 0 } {
+ set ret $str$ret
+ incr n -1
+ }
+ return $ret
+}
+
+proc isqrt { l } {
+ set s [expr sqrt($l)]
+ set ndx [expr [string first "." $s] - 1]
+ return [string range $s 0 $ndx]
+}
+
+# If we run watch_procs multiple times without an intervening
+# testdir cleanup, it's possible that old sentinel files will confuse
+# us. Make sure they're wiped out before we spawn any other processes.
+proc sentinel_init { } {
+ source ./include.tcl
+
+ set filelist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set filelist $result
+ }
+
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set filelist [concat $filelist $result]
+ }
+
+ foreach f $filelist {
+ fileremove $f
+ }
+}
+
+proc watch_procs { pidlist {delay 5} {max 3600} {quiet 0} } {
+ source ./include.tcl
+ global killed_procs
+
+ set elapsed 0
+ set killed_procs {}
+
+ # Don't start watching the processes until a sentinel
+ # file has been created for each one.
+ foreach pid $pidlist {
+ while { [file exists $testdir/begin.$pid] == 0 } {
+ tclsleep $delay
+ incr elapsed $delay
+ # If pids haven't been created in one-fifth
+ # of the time allowed for the whole test,
+ # there's a problem. Report an error and fail.
+ if { $elapsed > [expr {$max / 5}] } {
+ puts "FAIL: begin.pid not created"
+ break
+ }
+ }
+ }
+
+ while { 1 } {
+
+ tclsleep $delay
+ incr elapsed $delay
+
+ # Find the list of processes with outstanding sentinel
+ # files (i.e. a begin.pid and no end.pid).
+ set beginlist {}
+ set endlist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set beginlist $result
+ }
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set endlist $result
+ }
+
+ set bpids {}
+ catch {unset epids}
+ foreach begfile $beginlist {
+ lappend bpids [string range $begfile \
+ [string length $testdir/begin.] end]
+ }
+ foreach endfile $endlist {
+ set epids([string range $endfile \
+ [string length $testdir/end.] end]) 1
+ }
+
+ # The set of processes that we still want to watch, $l,
+ # is the set of pids that have begun but not ended
+ # according to their sentinel files.
+ set l {}
+ foreach p $bpids {
+ if { [info exists epids($p)] == 0 } {
+ lappend l $p
+ }
+ }
+
+ set rlist {}
+ foreach i $l {
+ set r [ catch { exec $KILL -0 $i } res ]
+ if { $r == 0 } {
+ lappend rlist $i
+ }
+ }
+ if { [ llength $rlist] == 0 } {
+ break
+ } else {
+ puts "[timestamp] processes running: $rlist"
+ }
+
+ if { $elapsed > $max } {
+ # We have exceeded the limit; kill processes
+ # and report an error
+ foreach i $l {
+ tclkill $i
+ }
+ set killed_procs $l
+ }
+ }
+ if { $quiet == 0 } {
+ puts "All processes have exited."
+ }
+
+ #
+ # Once we are done, remove all old sentinel files.
+ #
+ set oldsent [glob -nocomplain $testdir/begin* $testdir/end*]
+ foreach f oldsent {
+ fileremove -f $f
+ }
+
+}
+
+# These routines are all used from within the dbscript.tcl tester.
+proc db_init { dbp do_data } {
+ global a_keys
+ global l_keys
+ source ./include.tcl
+
+ set txn ""
+ set nk 0
+ set lastkey ""
+
+ set a_keys() BLANK
+ set l_keys ""
+
+ set c [$dbp cursor]
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ incr nk
+ if { $do_data == 1 } {
+ if { [info exists a_keys($k)] } {
+ lappend a_keys($k) $d2]
+ } else {
+ set a_keys($k) $d2
+ }
+ }
+
+ lappend l_keys $k
+ }
+ error_check_good curs_close [$c close] 0
+
+ return $nk
+}
+
+proc pick_op { min max n } {
+ if { $n == 0 } {
+ return add
+ }
+
+ set x [berkdb random_int 1 12]
+ if {$n < $min} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8} {
+ return get
+ } else {
+ return add
+ }
+ } elseif {$n > $max} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8 } {
+ return get
+ } else {
+ return del
+ }
+
+ } elseif { $x <= 3 } {
+ return del
+ } elseif { $x <= 6 } {
+ return get
+ } elseif { $x <= 9 } {
+ return put
+ } else {
+ return add
+ }
+}
+
+# random_data: Generate a string of random characters.
+# If recno is 0 - Use average to pick a length between 1 and 2 * avg.
+# If recno is non-0, generate a number between 1 and 2 ^ (avg * 2),
+# that will fit into a 32-bit integer.
+# If the unique flag is 1, then make sure that the string is unique
+# in the array "where".
+proc random_data { avg unique where {recno 0} } {
+ upvar #0 $where arr
+ global debug_on
+ set min 1
+ set max [expr $avg+$avg-1]
+ if { $recno } {
+ #
+ # Tcl seems to have problems with values > 30.
+ #
+ if { $max > 30 } {
+ set max 30
+ }
+ set maxnum [expr int(pow(2, $max))]
+ }
+ while {1} {
+ set len [berkdb random_int $min $max]
+ set s ""
+ if {$recno} {
+ set s [berkdb random_int 1 $maxnum]
+ } else {
+ for {set i 0} {$i < $len} {incr i} {
+ append s [int_to_char [berkdb random_int 0 25]]
+ }
+ }
+
+ if { $unique == 0 || [info exists arr($s)] == 0 } {
+ break
+ }
+ }
+
+ return $s
+}
+
+proc random_key { } {
+ global l_keys
+ global nkeys
+ set x [berkdb random_int 0 [expr $nkeys - 1]]
+ return [lindex $l_keys $x]
+}
+
+proc is_err { desired } {
+ set x [berkdb random_int 1 100]
+ if { $x <= $desired } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc pick_cursput { } {
+ set x [berkdb random_int 1 4]
+ switch $x {
+ 1 { return "-keylast" }
+ 2 { return "-keyfirst" }
+ 3 { return "-before" }
+ 4 { return "-after" }
+ }
+}
+
+proc random_cursor { curslist } {
+ global l_keys
+ global nkeys
+
+ set x [berkdb random_int 0 [expr [llength $curslist] - 1]]
+ set dbc [lindex $curslist $x]
+
+ # We want to randomly set the cursor. Pick a key.
+ set k [random_key]
+ set r [$dbc get "-set" $k]
+ error_check_good cursor_get:$k [is_substr Error $r] 0
+
+ # Now move forward or backward some hops to randomly
+ # position the cursor.
+ set dist [berkdb random_int -10 10]
+
+ set dir "-next"
+ set boundary "-first"
+ if { $dist < 0 } {
+ set dir "-prev"
+ set boundary "-last"
+ set dist [expr 0 - $dist]
+ }
+
+ for { set i 0 } { $i < $dist } { incr i } {
+ set r [ record $dbc get $dir $k ]
+ if { [llength $d] == 0 } {
+ set r [ record $dbc get $k $boundary ]
+ }
+ error_check_bad dbcget [llength $r] 0
+ }
+ return { [linsert r 0 $dbc] }
+}
+
+proc record { args } {
+# Recording every operation makes tests ridiculously slow on
+# NT, so we are commenting this out; for debugging purposes,
+# it will undoubtedly be useful to uncomment this.
+# puts $args
+# flush stdout
+ return [eval $args]
+}
+
+proc newpair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+ lappend l_keys $k
+ incr nkeys
+}
+
+proc rempair { k } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ unset a_keys($k)
+ set n [lsearch $l_keys $k]
+ error_check_bad rempair:$k $n -1
+ set l_keys [lreplace $l_keys $n $n]
+ incr nkeys -1
+}
+
+proc changepair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+}
+
+proc changedup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n $newdata]
+}
+
+# Insert a dup into the a_keys array with DB_KEYFIRST.
+proc adddup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ if { [llength $d] == 0 } {
+ lappend l_keys $k
+ incr nkeys
+ set a_keys($k) { $newdata }
+ }
+
+ set ndx 0
+
+ set d [linsert d $ndx $newdata]
+ set a_keys($k) $d
+}
+
+proc remdup { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d [$a_keys($k)]
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n]
+}
+
+proc dump_full_file { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ set outf [open $outfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good dbcursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get $start] } { [string length $d] != 0 } {
+ set d [$c get $continue] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ $checkfunc $k $d2
+ puts $outf "$k\t$d2"
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+proc int_to_char { i } {
+ global alphabet
+
+ return [string index $alphabet $i]
+}
+
+proc dbcheck { key data } {
+ global l_keys
+ global a_keys
+ global nkeys
+ global check_array
+
+ if { [lsearch $l_keys $key] == -1 } {
+ error "FAIL: Key |$key| not in list of valid keys"
+ }
+
+ set d $a_keys($key)
+
+ if { [info exists check_array($key) ] } {
+ set check $check_array($key)
+ } else {
+ set check {}
+ }
+
+ if { [llength $d] > 1 } {
+ if { [llength $check] != [llength $d] } {
+ # Make the check array the right length
+ for { set i [llength $check] } { $i < [llength $d] } \
+ {incr i} {
+ lappend check 0
+ }
+ set check_array($key) $check
+ }
+
+ # Find this data's index
+ set ndx [lsearch $d $data]
+ if { $ndx == -1 } {
+ error "FAIL: \
+ Data |$data| not found for key $key. Found |$d|"
+ }
+
+ # Set the bit in the check array
+ set check_array($key) [lreplace $check_array($key) $ndx $ndx 1]
+ } elseif { [string compare $d $data] != 0 } {
+ error "FAIL: \
+ Invalid data |$data| for key |$key|. Expected |$d|."
+ } else {
+ set check_array($key) 1
+ }
+}
+
+# Dump out the file and verify it
+proc filecheck { file txn args} {
+ global check_array
+ global l_keys
+ global nkeys
+ global a_keys
+ source ./include.tcl
+
+ if { [info exists check_array] == 1 } {
+ unset check_array
+ }
+
+ eval open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \
+ "-first" "-next" $args
+
+ # Check that everything we checked had all its data
+ foreach i [array names check_array] {
+ set count 0
+ foreach j $check_array($i) {
+ if { $j != 1 } {
+ puts -nonewline "Key |$i| never found datum"
+ puts " [lindex $a_keys($i) $count]"
+ }
+ incr count
+ }
+ }
+
+ # Check that all keys appeared in the checked array
+ set count 0
+ foreach k $l_keys {
+ if { [info exists check_array($k)] == 0 } {
+ puts "filecheck: key |$k| not found. Data: $a_keys($k)"
+ }
+ incr count
+ }
+
+ if { $count != $nkeys } {
+ puts "filecheck: Got $count keys; expected $nkeys"
+ }
+}
+
+proc cleanup { dir env { quiet 0 } } {
+ global gen_upgrade
+ global gen_dump
+ global is_qnx_test
+ global is_je_test
+ global old_encrypt
+ global passwd
+ source ./include.tcl
+
+ if { $gen_upgrade == 1 || $gen_dump == 1 } {
+ save_upgrade_files $dir
+ }
+
+# check_handles
+ set remfiles {}
+ set ret [catch { glob $dir/* } result]
+ if { $ret == 0 } {
+ foreach fileorig $result {
+ #
+ # We:
+ # - Ignore any env-related files, which are
+ # those that have __db.* or log.* if we are
+ # running in an env. Also ignore files whose
+ # names start with REPDIR_; these are replication
+ # subdirectories.
+ # - Call 'dbremove' on any databases.
+ # Remove any remaining temp files.
+ #
+ switch -glob -- $fileorig {
+ */DIR_* -
+ */__db.* -
+ */log.* -
+ */*.jdb {
+ if { $env != "NULL" } {
+ continue
+ } else {
+ if { $is_qnx_test } {
+ catch {berkdb envremove -force \
+ -home $dir} r
+ }
+ lappend remfiles $fileorig
+ }
+ }
+ *.db {
+ set envargs ""
+ set encarg ""
+ #
+ # If in an env, it should be open crypto
+ # or not already.
+ #
+ if { $env != "NULL"} {
+ set file [file tail $fileorig]
+ set envargs " -env $env "
+ if { [is_txnenv $env] } {
+ append envargs " -auto_commit "
+ }
+ } else {
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set file $fileorig
+ }
+
+ # If a database is left in a corrupt
+ # state, dbremove might not be able to handle
+ # it (it does an open before the remove).
+ # Be prepared for this, and if necessary,
+ # just forcibly remove the file with a warning
+ # message.
+ set ret [catch \
+ {eval {berkdb dbremove} $envargs $encarg \
+ $file} res]
+ # If dbremove failed and we're not in an env,
+ # note that we don't have 100% certainty
+ # about whether the previous run used
+ # encryption. Try to remove with crypto if
+ # we tried without, and vice versa.
+ if { $ret != 0 } {
+ if { $env == "NULL" && \
+ $old_encrypt == 0} {
+ set ret [catch \
+ {eval {berkdb dbremove} \
+ -encryptany $passwd \
+ $file} res]
+ }
+ if { $env == "NULL" && \
+ $old_encrypt == 1 } {
+ set ret [catch \
+ {eval {berkdb dbremove} \
+ $file} res]
+ }
+ if { $ret != 0 } {
+ if { $quiet == 0 } {
+ puts \
+ "FAIL: dbremove in cleanup failed: $res"
+ }
+ set file $fileorig
+ lappend remfiles $file
+ }
+ }
+ }
+ default {
+ lappend remfiles $fileorig
+ }
+ }
+ }
+ if {[llength $remfiles] > 0} {
+ #
+ # In the HFS file system there are cases where not
+ # all files are removed on the first attempt. If
+ # it fails, try again a few times.
+ #
+ # This bug has been compensated for in Tcl with a fix
+ # checked into Tcl 8.4. When Berkeley DB requires
+ # Tcl 8.5, we can remove this while loop and replace
+ # it with a simple 'fileremove -f $remfiles'.
+ #
+ set count 0
+ while { [catch {eval fileremove -f $remfiles}] == 1 \
+ && $count < 5 } {
+ incr count
+ }
+ }
+
+ if { $is_je_test } {
+ set rval [catch {eval {exec \
+ $util_path/db_dump} -h $dir -l } res]
+ if { $rval == 0 } {
+ set envargs " -env $env "
+ if { [is_txnenv $env] } {
+ append envargs " -auto_commit "
+ }
+
+ foreach db $res {
+ set ret [catch {eval \
+ {berkdb dbremove} $envargs $db } res]
+ }
+ }
+ }
+ }
+}
+
+proc log_cleanup { dir } {
+ source ./include.tcl
+ global gen_upgrade_log
+
+ if { $gen_upgrade_log == 1 } {
+ save_upgrade_files $dir
+ }
+
+ set files [glob -nocomplain $dir/log.*]
+ if { [llength $files] != 0} {
+ foreach f $files {
+ fileremove -f $f
+ }
+ }
+}
+
+proc env_cleanup { dir } {
+ global old_encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]
+ #
+ # If something failed and we are left with a region entry
+ # in /dev/shmem that is zero-length, the envremove will
+ # succeed, and the shm_unlink will succeed, but it will not
+ # remove the zero-length entry from /dev/shmem. Remove it
+ # using fileremove or else all other tests using an env
+ # will immediately fail.
+ #
+ if { $is_qnx_test == 1 } {
+ set region_files [glob -nocomplain /dev/shmem/$dir*]
+ if { [llength $region_files] != 0 } {
+ foreach f $region_files {
+ fileremove -f $f
+ }
+ }
+ }
+ log_cleanup $dir
+ cleanup $dir NULL
+}
+
+# Start an RPC server. Don't return to caller until the
+# server is up. Wait up to $maxwait seconds.
+proc rpc_server_start { { encrypted 0 } { maxwait 30 } { args "" } } {
+ source ./include.tcl
+ global rpc_svc
+ global passwd
+
+ set encargs ""
+ # Set -v for verbose messages from the RPC server.
+ # set encargs " -v "
+
+ if { $encrypted == 1 } {
+ set encargs " -P $passwd "
+ }
+
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [eval {exec $util_path/$rpc_svc \
+ -h $rpc_testdir} $args $encargs &]
+ } else {
+ set dpid [eval {exec rsh $rpc_server \
+ $rpc_path/$rpc_svc -h $rpc_testdir $args} &]
+ }
+
+ # Wait a couple of seconds before we start looking for
+ # the server.
+ tclsleep 2
+ set home [file tail $rpc_testdir]
+ if { $encrypted == 1 } {
+ set encargs " -encryptaes $passwd "
+ }
+ for { set i 0 } { $i < $maxwait } { incr i } {
+ # Try an operation -- while it fails with NOSERVER, sleep for
+ # a second and retry.
+ if {[catch {berkdb envremove -force -home "$home.FAIL" \
+ -server $rpc_server} res] && \
+ [is_substr $res DB_NOSERVER:]} {
+ tclsleep 1
+ } else {
+ # Server is up, clean up and return to caller
+ break
+ }
+ if { $i >= $maxwait } {
+ puts "FAIL: RPC server\
+ not started after $maxwait seconds"
+ }
+ }
+ return $dpid
+}
+
+proc remote_cleanup { server dir localdir } {
+ set home [file tail $dir]
+ error_check_good cleanup:remove [berkdb envremove -home $home \
+ -server $server] 0
+ catch {exec rsh $server rm -f $dir/*} ret
+ cleanup $localdir NULL
+}
+
+proc help { cmd } {
+ if { [info command $cmd] == $cmd } {
+ set is_proc [lsearch [info procs $cmd] $cmd]
+ if { $is_proc == -1 } {
+ # Not a procedure; must be a C command
+ # Let's hope that it takes some parameters
+ # and that it prints out a message
+ puts "Usage: [eval $cmd]"
+ } else {
+ # It is a tcl procedure
+ puts -nonewline "Usage: $cmd"
+ set args [info args $cmd]
+ foreach a $args {
+ set is_def [info default $cmd $a val]
+ if { $is_def != 0 } {
+ # Default value
+ puts -nonewline " $a=$val"
+ } elseif {$a == "args"} {
+ # Print out flag values
+ puts " options"
+ args
+ } else {
+ # No default value
+ puts -nonewline " $a"
+ }
+ }
+ puts ""
+ }
+ } else {
+ puts "$cmd is not a command"
+ }
+}
+
+# Run a recovery test for a particular operation
+# Notice that we catch the return from CP and do not do anything with it.
+# This is because Solaris CP seems to exit non-zero on occasion, but
+# everything else seems to run just fine.
+#
+# We split it into two functions so that the preparation and command
+# could be executed in a different process than the recovery.
+#
+proc op_codeparse { encodedop op } {
+ set op1 ""
+ set op2 ""
+ switch $encodedop {
+ "abort" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "commit" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "prepare-abort" {
+ set op1 "prepare"
+ set op2 "abort"
+ }
+ "prepare-commit" {
+ set op1 "prepare"
+ set op2 "commit"
+ }
+ "prepare-discard" {
+ set op1 "prepare"
+ set op2 "discard"
+ }
+ }
+
+ if { $op == "op" } {
+ return $op1
+ } else {
+ return $op2
+ }
+}
+
+proc op_recover { encodedop dir env_cmd dbfile cmd msg args} {
+ source ./include.tcl
+
+ set op [op_codeparse $encodedop "op"]
+ set op2 [op_codeparse $encodedop "sub"]
+ puts "\t$msg $encodedop"
+ set gidf ""
+ # puts "op_recover: $op $dir $env_cmd $dbfile $cmd $args"
+ if { $op == "prepare" } {
+ sentinel_init
+
+ # Fork off a child to run the cmd
+ # We append the gid, so start here making sure
+ # we don't have old gid's around.
+ set outfile $testdir/childlog
+ fileremove -f $testdir/gidfile
+ set gidf $testdir/gidfile
+ set pidlist {}
+ # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \
+ # $op $dir $env_cmd $dbfile $gidf $cmd"
+ set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \
+ $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd $args &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts -nonewline $r
+ close $f1
+ fileremove -f $testdir/recdout
+ } else {
+ eval {op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd} $args
+ }
+ eval {op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf} $args
+}
+
+proc op_recover_prep { op dir env_cmd dbfile gidf cmd args} {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ source ./include.tcl
+
+ # puts "op_recover_prep: $op $dir $env_cmd $dbfile $cmd $args"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ set db_cursor ""
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ # Save the initial file and open the environment and the file
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
+ copy_extent_file $dir $dbfile init
+
+ convert_encrypt $env_cmd
+ set env [eval $env_cmd]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ eval set args $args
+ set db [eval {berkdb open -auto_commit -env $env} $args {$dbfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Dump out file contents for initial case
+ eval open_and_dump_file $dbfile $env $init_file nop \
+ dump_file_direction "-first" "-next" $args
+
+ set t [$env txn]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_substr $t "txn"] 1
+
+ # Now fill in the db, tmgr, and the txnid in the command
+ set exec_cmd $cmd
+
+ set items [lsearch -all $cmd ENV]
+ foreach i $items {
+ set exec_cmd [lreplace $exec_cmd $i $i $env]
+ }
+
+ set items [lsearch -all $cmd TXNID]
+ foreach i $items {
+ set exec_cmd [lreplace $exec_cmd $i $i $t]
+ }
+
+ set items [lsearch -all $cmd DB]
+ foreach i $items {
+ set exec_cmd [lreplace $exec_cmd $i $i $db]
+ }
+
+ set i [lsearch $cmd DBC]
+ if { $i != -1 } {
+ set db_cursor [$db cursor -txn $t]
+ $db_cursor get -first
+ }
+ set adjust 0
+ set items [lsearch -all $cmd DBC]
+ foreach i $items {
+ # make sure the cursor is pointing to something.
+ set exec_cmd [lreplace $exec_cmd \
+ [expr $i + $adjust] [expr $i + $adjust] $db_cursor]
+ set txn_pos [lsearch $exec_cmd -txn]
+ if { $txn_pos != -1} {
+ # Strip out the txn parameter, we've applied it to the
+ # cursor.
+ set exec_cmd \
+ [lreplace $exec_cmd $txn_pos [expr $txn_pos + 1]]
+ # Now the offsets in the items list are out-of-whack,
+ # keep track of how far.
+ set adjust [expr $adjust - 2]
+ }
+ }
+
+ # To test DB_CONSUME, we need to expect a record return, not "0".
+ set i [lsearch $exec_cmd "-consume"]
+ if { $i != -1 } {
+ set record_exec_cmd_ret 1
+ } else {
+ set record_exec_cmd_ret 0
+ }
+
+ # For the DB_APPEND test, we need to expect a return other than
+ # 0; set this flag to be more lenient in the error_check_good.
+ set i [lsearch $exec_cmd "-append"]
+ if { $i != -1 } {
+ set lenient_exec_cmd_ret 1
+ } else {
+ set lenient_exec_cmd_ret 0
+ }
+
+ # For some partial tests we want to execute multiple commands. Pull
+ # pull them out here.
+ set last 0
+ set exec_cmd2 ""
+ set exec_cmds [list]
+ set items [lsearch -all $exec_cmd NEW_CMD]
+ foreach i $items {
+ if { $last == 0 } {
+ set exec_cmd2 [lrange $exec_cmd 0 [expr $i - 1]]
+ } else {
+ lappend exec_cmds [lrange $exec_cmd \
+ [expr $last + 1] [expr $i - 1]]
+ }
+ set last $i
+ }
+ if { $last != 0 } {
+ lappend exec_cmds [lrange $exec_cmd [expr $last + 1] end]
+ set exec_cmd $exec_cmd2
+ }
+ #puts "exec_cmd: $exec_cmd"
+ #puts "exec_cmds: $exec_cmds"
+
+ # Execute command and commit/abort it.
+ set ret [eval $exec_cmd]
+ if { $record_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2
+ } elseif { $lenient_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1
+ } else {
+ error_check_good "\"$exec_cmd\"" $ret 0
+ }
+ # If there are additional commands, run them.
+ foreach curr_cmd $exec_cmds {
+ error_check_good "\"$curr_cmd\"" $ret 0
+ }
+
+ # If a cursor was created, close it now.
+ if {$db_cursor != ""} {
+ error_check_good close:$db_cursor [$db_cursor close] 0
+ }
+
+ set record_exec_cmd_ret 0
+ set lenient_exec_cmd_ret 0
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+ eval open_and_dump_file $dir/$dbfile.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next" $args
+
+ #puts "\t\t\tExecuting txn_$op:$t"
+ if { $op == "prepare" } {
+ set gid [make_gid global:$t]
+ set gfd [open $gidf w+]
+ puts $gfd $gid
+ close $gfd
+ error_check_good txn_$op:$t [$t $op $gid] 0
+ } else {
+ error_check_good txn_$op:$t [$t $op] 0
+ }
+
+ switch $op {
+ "commit" { puts "\t\tCommand executed and committed." }
+ "abort" { puts "\t\tCommand executed and aborted." }
+ "prepare" { puts "\t\tCommand executed and prepared." }
+ }
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
+ copy_extent_file $dir $dbfile final
+ eval open_and_dump_file $dir/$dbfile.final NULL \
+ $final_file nop dump_file_direction "-first" "-next" $args
+
+ # If this is an abort or prepare-abort, it should match the
+ # original file.
+ # If this was a commit or prepare-commit, then this file should
+ # match the afterop file.
+ # If this was a prepare without an abort or commit, we still
+ # have transactions active, and peering at the database from
+ # another environment will show data from uncommitted transactions.
+ # Thus we just skip this in the prepare-only case; what
+ # we care about are the results of a prepare followed by a
+ # recovery, which we test later.
+ if { $op == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } elseif { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ # Make sure this really is one of the prepare tests
+ error_check_good assert:prepare-test $op "prepare"
+ }
+
+ # Running recovery on this database should not do anything.
+ # Flush all data to disk, close the environment and save the
+ # file.
+ # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,
+ # you really have an active transaction and you're not allowed
+ # to close files that are being acted upon by in-process
+ # transactions.
+ if { $op != "prepare" } {
+ error_check_good close:$db [$db close] 0
+ }
+
+ #
+ # If we are running 'prepare' don't close the env with an
+ # active transaction. Leave it alone so the close won't
+ # quietly abort it on us.
+ if { [is_substr $op "prepare"] != 1 } {
+ error_check_good log_flush [$env log_flush] 0
+ error_check_good envclose [$env close] 0
+ }
+ return
+}
+
+proc op_recover_rec { op op2 dir env_cmd dbfile gidf args} {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\top_recover_rec: Running recovery ... "
+ flush stdout
+
+ set recargs "-h $dir -c "
+ if { $encrypt > 0 } {
+ append recargs " -P $passwd "
+ }
+ set stat [catch {eval exec $util_path/db_recover -e $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ #
+ # We cannot run db_recover here because that will open an env, run
+ # recovery, then close it, which will abort the outstanding txns.
+ # We want to do it ourselves.
+ #
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ if {[is_partition_callback $args] == 1 } {
+ set nodump 1
+ } else {
+ set nodump 0
+ }
+ error_check_good db_verify [verify_dir $testdir "\t\t" 0 1 $nodump] 0
+ puts "verified"
+
+ # If we left a txn as prepared, but not aborted or committed,
+ # we need to do a txn_recover. Make sure we have the same
+ # number of txns we want.
+ if { $op == "prepare"} {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set gfd [open $gidf r]
+ set origgid [read -nonewline $gfd]
+ close $gfd
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_$op2:$t"
+ error_check_good txn_$op2:$t [$t $op2] 0
+ #
+ # If we are testing discard, we do need to resolve
+ # the txn, so get the list again and now abort it.
+ #
+ if { $op2 == "discard" } {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_abort:$t"
+ error_check_good disc_txn_abort:$t [$t abort] 0
+ }
+ }
+
+
+ eval set args $args
+ eval open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next" $args
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # Now close the environment, substitute a file that will need
+ # recovery and try running recovery again.
+ reset_env $env
+ if { $op == "commit" || $op2 == "commit" } {
+ catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res
+ move_file_extent $dir $dbfile init copy
+ } else {
+ catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res
+ move_file_extent $dir $dbfile afterop copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\tRunning recovery on pre-op database ... "
+ flush stdout
+
+ set stat [catch {eval exec $util_path/db_recover $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ error_check_good db_verify_preop \
+ [verify_dir $testdir "\t\t" 0 1 $nodump] 0
+
+ puts "verified"
+
+ set env [eval $env_cmd]
+
+ eval open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next" $args
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $final_file $final_file.sort
+ filesort $afterop_file $afterop_file.sort
+ error_check_good \
+ diff(post-$op,recovered):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # This should just close the environment, not blow it away.
+ reset_env $env
+}
+
+proc populate { db method txn n dups bigdata } {
+ source ./include.tcl
+
+ # Handle non-transactional cases, too.
+ set t ""
+ if { [llength $txn] > 0 } {
+ set t " -txn $txn "
+ }
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } elseif { $dups == 1 } {
+ set key duplicate_key
+ } else {
+ set key $str
+ }
+ if { $bigdata == 1 && [berkdb random_int 1 3] == 1} {
+ set str [replicate $str 1000]
+ }
+
+ set ret [eval {$db put} $t {$key [chop_data $method $str]}]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc big_populate { db txn n } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ set key [replicate $str 50]
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc unpopulate { db txn num } {
+ source ./include.tcl
+
+ set c [eval {$db cursor} "-txn $txn"]
+ error_check_bad $db:cursor $c NULL
+ error_check_good $db:cursor [is_substr $c $db] 1
+
+ set i 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ $c del
+ incr i
+ if { $num != 0 && $i >= $num } {
+ break
+ }
+ }
+ error_check_good cursor_close [$c close] 0
+ return 0
+}
+
+# Flush logs for txn envs only.
+proc reset_env { env } {
+ if { [is_txnenv $env] } {
+ error_check_good log_flush [$env log_flush] 0
+ }
+ error_check_good env_close [$env close] 0
+}
+
+proc maxlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc maxwrites { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc minlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc minwrites { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc countlocks { myenv locker_id obj_id num } {
+ set locklist ""
+ for { set i 0} {$i < [expr $obj_id * 4]} { incr i } {
+ set r [catch {$myenv lock_get read $locker_id \
+ [expr $obj_id * 1000 + $i]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ # Now acquire one write lock, except for obj_id 1, which doesn't
+ # acquire any. We'll use obj_id 1 to test minwrites.
+ if { $obj_id != 1 } {
+ set r [catch {$myenv lock_get write $locker_id \
+ [expr $obj_id * 1000 + 10]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ # Get one extra write lock for obj_id 2. We'll use
+ # obj_id 2 to test maxwrites.
+ #
+ if { $obj_id == 2 } {
+ set extra [catch {$myenv lock_get write \
+ $locker_id [expr $obj_id * 1000 + 11]} l ]
+ if { $extra != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ set ret [ring $myenv $locker_id $obj_id $num]
+
+ foreach l $locklist {
+ error_check_good lockput:$l [$l put] 0
+ }
+
+ return $ret
+}
+
+# This routine will let us obtain a ring of deadlocks.
+# Each locker will get a lock on obj_id, then sleep, and
+# then try to lock (obj_id + 1) % num.
+# When the lock is finally granted, we release our locks and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker deadlocks and the
+# rest all finish successfully.
+proc ring { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+ set nextobj [expr ($obj_id + 1) % $num]
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ if {[string match "*NOTGRANTED*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ puts $lock2
+ set ret ERROR
+ }
+ }
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$obj_id $lock2 NULL
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+}
+
+# This routine will create massive deadlocks.
+# Each locker will get a readlock on obj_id, then sleep, and
+# then try to upgrade the readlock to a write lock.
+# When the lock is finally granted, we release our first lock and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker succeeds in getting all
+# the locks and everyone else deadlocks.
+proc clump { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ set obj_id 10
+ if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id \
+ [is_valid_lock $lock1 $myenv] TRUE
+ }
+
+ tclsleep 30
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ if {[string match "*NOTGRANTED*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ puts $lock2
+ set ret ERROR
+ }
+ }
+ } else {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+}
+
+proc dead_check { t procs timeout dead clean other } {
+ error_check_good $t:$procs:other $other 0
+ switch $t {
+ ring {
+ # With timeouts the number of deadlocks is
+ # unpredictable: test for at least one deadlock.
+ if { $timeout != 0 && $dead > 1 } {
+ set clean [ expr $clean + $dead - 1]
+ set dead 1
+ }
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ clump {
+ # With timeouts the number of deadlocks is
+ # unpredictable: test for no more than one
+ # successful lock.
+ if { $timeout != 0 && $dead == $procs } {
+ set clean 1
+ set dead [expr $procs - 1]
+ }
+ error_check_good $t:$procs:deadlocks $dead \
+ [expr $procs - 1]
+ error_check_good $t:$procs:success $clean 1
+ }
+ oldyoung {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ maxlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ maxwrites {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minwrites {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ default {
+ error "Test $t not implemented"
+ }
+ }
+}
+
+proc rdebug { id op where } {
+ global recd_debug
+ global recd_id
+ global recd_op
+
+ set recd_debug $where
+ set recd_id $id
+ set recd_op $op
+}
+
+proc rtag { msg id } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { $id == $tag } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc zero_list { n } {
+ set ret ""
+ while { $n > 0 } {
+ lappend ret 0
+ incr n -1
+ }
+ return $ret
+}
+
+proc check_dump { k d } {
+ puts "key: $k data: $d"
+}
+
+proc reverse { s } {
+ set res ""
+ for { set i 0 } { $i < [string length $s] } { incr i } {
+ set res "[string index $s $i]$res"
+ }
+
+ return $res
+}
+
+#
+# This is a internal only proc. All tests should use 'is_valid_db' etc.
+#
+proc is_valid_widget { w expected } {
+ # First N characters must match "expected"
+ set l [string length $expected]
+ incr l -1
+ if { [string compare [string range $w 0 $l] $expected] != 0 } {
+ return $w
+ }
+
+ # Remaining characters must be digits
+ incr l 1
+ for { set i $l } { $i < [string length $w] } { incr i} {
+ set c [string index $w $i]
+ if { $c < "0" || $c > "9" } {
+ return $w
+ }
+ }
+
+ return TRUE
+}
+
+proc is_valid_db { db } {
+ return [is_valid_widget $db db]
+}
+
+proc is_valid_env { env } {
+ return [is_valid_widget $env env]
+}
+
+proc is_valid_cursor { dbc db } {
+ return [is_valid_widget $dbc $db.c]
+}
+
+proc is_valid_lock { lock env } {
+ return [is_valid_widget $lock $env.lock]
+}
+
+proc is_valid_logc { logc env } {
+ return [is_valid_widget $logc $env.logc]
+}
+
+proc is_valid_mpool { mpool env } {
+ return [is_valid_widget $mpool $env.mp]
+}
+
+proc is_valid_page { page mpool } {
+ return [is_valid_widget $page $mpool.pg]
+}
+
+proc is_valid_txn { txn env } {
+ return [is_valid_widget $txn $env.txn]
+}
+
+proc is_valid_lock {l env} {
+ return [is_valid_widget $l $env.lock]
+}
+
+proc is_valid_locker {l } {
+ return [is_valid_widget $l ""]
+}
+
+proc is_valid_seq { seq } {
+ return [is_valid_widget $seq seq]
+}
+
+proc send_cmd { fd cmd {sleep 2}} {
+ source ./include.tcl
+
+ puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \
+ puts \"FAIL: \$ret\" \
+ }"
+ puts $fd "flush stdout"
+ flush $fd
+ berkdb debug_check
+ tclsleep $sleep
+
+ set r [rcv_result $fd]
+ return $r
+}
+
+proc rcv_result { fd } {
+ global errorInfo
+
+ set r [gets $fd result]
+ if { $r == -1 } {
+ puts "FAIL: gets returned -1 (EOF)"
+ puts "FAIL: errorInfo is $errorInfo"
+ }
+
+ return $result
+}
+
+proc send_timed_cmd { fd rcv_too cmd } {
+ set c1 "set start \[timestamp -r\]; "
+ set c2 "puts \[expr \[timestamp -r\] - \$start\]"
+ set full_cmd [concat $c1 $cmd ";" $c2]
+
+ puts $fd $full_cmd
+ puts $fd "flush stdout"
+ flush $fd
+ return 0
+}
+
+#
+# The rationale behind why we have *two* "data padding" routines is outlined
+# below:
+#
+# Both pad_data and chop_data truncate data that is too long. However,
+# pad_data also adds the pad character to pad data out to the fixed length
+# record length.
+#
+# Which routine you call does not depend on the length of the data you're
+# using, but on whether you're doing a put or a get. When we do a put, we
+# have to make sure the data isn't longer than the size of a record because
+# otherwise we'll get an error (use chop_data). When we do a get, we want to
+# check that db padded everything correctly (use pad_data on the value against
+# which we are comparing).
+#
+# We don't want to just use the pad_data routine for both purposes, because
+# we want to be able to test whether or not db is padding correctly. For
+# example, the queue access method had a bug where when a record was
+# overwritten (*not* a partial put), only the first n bytes of the new entry
+# were written, n being the new entry's (unpadded) length. So, if we did
+# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get
+# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would
+# have gotten the "correct" result, but we wouldn't have found this bug.
+proc chop_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1 && \
+ [string length $data] > $fixed_len} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc pad_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+#
+# The make_fixed_length proc is used in special circumstances where we
+# absolutely need to send in data that is already padded out to the fixed
+# length with a known pad character. Most tests should use chop_data and
+# pad_data, not this.
+#
+proc make_fixed_length {method data {pad 0}} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1} {
+ set data [chop_data $method $data]
+ while { [string length $data] < $fixed_len } {
+ set data [format $data%c $pad]
+ }
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 128 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+# shift data for partial
+# pad with fixed pad (which is NULL)
+proc partial_shift { data offset direction} {
+ global fixed_len
+
+ set len [expr $fixed_len - 1]
+
+ if { [string compare $direction "right"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [binary format x1a$len $data]
+ }
+ } elseif { [string compare $direction "left"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [string range $data 1 end]
+ set data [binary format a$len $data]
+ }
+ }
+ return $data
+}
+
+# string compare does not always work to compare
+# this data, nor does expr (==)
+# specialized routine for comparison
+# (for use in fixed len recno and q)
+proc binary_compare { data1 data2 } {
+ if { [string length $data1] != [string length $data2] || \
+ [string compare -length \
+ [string length $data1] $data1 $data2] != 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# This is a comparison function used with the lsort command.
+# It treats its inputs as 32 bit signed integers for comparison,
+# and is coded to work with both 32 bit and 64 bit versions of tclsh.
+proc int32_compare { val1 val2 } {
+ # Big is set to 2^32 on a 64 bit machine, or 0 on 32 bit machine.
+ set big [expr 0xffffffff + 1]
+ if { $val1 >= 0x80000000 } {
+ set val1 [expr $val1 - $big]
+ }
+ if { $val2 >= 0x80000000 } {
+ set val2 [expr $val2 - $big]
+ }
+ return [expr $val1 - $val2]
+}
+
+proc convert_method { method } {
+ switch -- $method {
+ -btree -
+ -dbtree -
+ dbtree -
+ -ddbtree -
+ ddbtree -
+ -rbtree -
+ BTREE -
+ DB_BTREE -
+ DB_RBTREE -
+ RBTREE -
+ bt -
+ btree -
+ db_btree -
+ db_rbtree -
+ rbt -
+ rbtree { return "-btree" }
+
+ -dhash -
+ -ddhash -
+ -hash -
+ DB_HASH -
+ HASH -
+ dhash -
+ ddhash -
+ db_hash -
+ h -
+ hash { return "-hash" }
+
+ -queue -
+ DB_QUEUE -
+ QUEUE -
+ db_queue -
+ q -
+ qam -
+ queue -
+ -iqueue -
+ DB_IQUEUE -
+ IQUEUE -
+ db_iqueue -
+ iq -
+ iqam -
+ iqueue { return "-queue" }
+
+ -queueextent -
+ QUEUEEXTENT -
+ qe -
+ qamext -
+ -queueext -
+ queueextent -
+ queueext -
+ -iqueueextent -
+ IQUEUEEXTENT -
+ iqe -
+ iqamext -
+ -iqueueext -
+ iqueueextent -
+ iqueueext { return "-queue" }
+
+ -frecno -
+ -recno -
+ -rrecno -
+ DB_FRECNO -
+ DB_RECNO -
+ DB_RRECNO -
+ FRECNO -
+ RECNO -
+ RRECNO -
+ db_frecno -
+ db_recno -
+ db_rrecno -
+ frec -
+ frecno -
+ rec -
+ recno -
+ rrec -
+ rrecno { return "-recno" }
+
+ default { error "FAIL:[timestamp] $method: unknown method" }
+ }
+}
+
+proc split_partition_args { largs } {
+
+ # First check for -partition_callback, in which case we
+ # need to remove three args.
+ set index [lsearch $largs "-partition_callback"]
+ if { $index == -1 } {
+ set newl $largs
+ } else {
+ set end [expr $index + 2]
+ set newl [lreplace $largs $index $end]
+ }
+
+ # Then check for -partition, and remove two args.
+ set index [lsearch $newl "-partition"]
+ if { $index > -1 } {
+ set end [expr $index + 1]
+ set newl [lreplace $largs $index $end]
+ }
+
+ return $newl
+}
+
+# Strip "-compress" out of a string of args.
+proc strip_compression_args { largs } {
+
+ set cindex [lsearch $largs "-compress"]
+ if { $cindex == -1 } {
+ set newargs $largs
+ } else {
+ set newargs [lreplace $largs $cindex $cindex]
+ }
+ return $newargs
+}
+
+proc split_encargs { largs encargsp } {
+ global encrypt
+ upvar $encargsp e
+ set eindex [lsearch $largs "-encrypta*"]
+ if { $eindex == -1 } {
+ set e ""
+ set newl $largs
+ } else {
+ set eend [expr $eindex + 1]
+ set e [lrange $largs $eindex $eend]
+ set newl [lreplace $largs $eindex $eend "-encrypt"]
+ }
+ return $newl
+}
+
+proc split_pageargs { largs pageargsp } {
+ upvar $pageargsp e
+ set eindex [lsearch $largs "-pagesize"]
+ if { $eindex == -1 } {
+ set e ""
+ set newl $largs
+ } else {
+ set eend [expr $eindex + 1]
+ set e [lrange $largs $eindex $eend]
+ set newl [lreplace $largs $eindex $eend ""]
+ }
+ return $newl
+}
+
+proc convert_encrypt { largs } {
+ global encrypt
+ global old_encrypt
+
+ set old_encrypt $encrypt
+ set encrypt 0
+ if { [lsearch $largs "-encrypt*"] != -1 } {
+ set encrypt 1
+ }
+}
+
+# If recno-with-renumbering or btree-with-renumbering is specified, then
+# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
+# -flags argument.
+proc convert_args { method {largs ""} } {
+ global fixed_len
+ global gen_upgrade
+ global upgrade_be
+ source ./include.tcl
+
+ if { [string first - $largs] == -1 &&\
+ [string compare $largs ""] != 0 &&\
+ [string compare $largs {{}}] != 0 } {
+ set errstring "args must contain a hyphen; does this test\
+ have no numeric args?"
+ puts "FAIL:[timestamp] $errstring (largs was $largs)"
+ return -code return
+ }
+
+ convert_encrypt $largs
+ if { $gen_upgrade == 1 && $upgrade_be == 1 } {
+ append largs " -lorder 4321 "
+ } elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
+ append largs " -lorder 1234 "
+ }
+
+ if { [is_rrecno $method] == 1 } {
+ append largs " -renumber "
+ } elseif { [is_rbtree $method] == 1 } {
+ append largs " -recnum "
+ } elseif { [is_dbtree $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddbtree $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_dhash $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddhash $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_queueext $method] == 1 } {
+ append largs " -extent 4 "
+ }
+
+ if { [is_iqueue $method] == 1 || [is_iqueueext $method] == 1 } {
+ append largs " -inorder "
+ }
+
+ # Default padding character is ASCII nul.
+ set fixed_pad 0
+ if {[is_fixed_length $method] == 1} {
+ append largs " -len $fixed_len -pad $fixed_pad "
+ }
+ return $largs
+}
+
+proc is_btree { method } {
+ set names { -btree BTREE DB_BTREE bt btree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dbtree { method } {
+ set names { -dbtree dbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddbtree { method } {
+ set names { -ddbtree ddbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rbtree { method } {
+ set names { -rbtree rbtree RBTREE db_rbtree DB_RBTREE rbt }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_recno { method } {
+ set names { -recno DB_RECNO RECNO db_recno rec recno}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rrecno { method } {
+ set names { -rrecno rrecno RRECNO db_rrecno DB_RRECNO rrec }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_frecno { method } {
+ set names { -frecno frecno frec FRECNO db_frecno DB_FRECNO}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_hash { method } {
+ set names { -hash DB_HASH HASH db_hash h hash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dhash { method } {
+ set names { -dhash dhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddhash { method } {
+ set names { -ddhash ddhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queue { method } {
+ if { [is_queueext $method] == 1 || [is_iqueue $method] == 1 || \
+ [is_iqueueext $method] == 1 } {
+ return 1
+ }
+
+ set names { -queue DB_QUEUE QUEUE db_queue q queue qam }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queueext { method } {
+ if { [is_iqueueext $method] == 1 } {
+ return 1
+ }
+
+ set names { -queueextent queueextent QUEUEEXTENT qe qamext \
+ queueext -queueext }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_iqueue { method } {
+ if { [is_iqueueext $method] == 1 } {
+ return 1
+ }
+
+ set names { -iqueue DB_IQUEUE IQUEUE db_iqueue iq iqueue iqam }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_iqueueext { method } {
+ set names { -iqueueextent iqueueextent IQUEUEEXTENT iqe iqamext \
+ iqueueext -iqueueext }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_record_based { method } {
+ if { [is_recno $method] || [is_frecno $method] ||
+ [is_rrecno $method] || [is_queue $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_fixed_length { method } {
+ if { [is_queue $method] || [is_frecno $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_compressed { args } {
+ if { [string first "-compress" $args] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_partitioned { args } {
+ if { [string first "-partition" $args] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_partition_callback { args } {
+ if { [string first "-partition_callback" $args] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Sort lines in file $in and write results to file $out.
+# This is a more portable alternative to execing the sort command,
+# which has assorted issues on NT [#1576].
+# The addition of a "-n" argument will sort numerically.
+proc filesort { in out { arg "" } } {
+ set i [open $in r]
+
+ set ilines {}
+ while { [gets $i line] >= 0 } {
+ lappend ilines $line
+ }
+
+ if { [string compare $arg "-n"] == 0 } {
+ set olines [lsort -integer $ilines]
+ } else {
+ set olines [lsort $ilines]
+ }
+
+ close $i
+
+ set o [open $out w]
+ foreach line $olines {
+ puts $o $line
+ }
+
+ close $o
+}
+
+# Print lines up to the nth line of infile out to outfile, inclusive.
+# The optional beg argument tells us where to start.
+proc filehead { n infile outfile { beg 0 } } {
+ set in [open $infile r]
+ set out [open $outfile w]
+
+ # Sed uses 1-based line numbers, and so we do too.
+ for { set i 1 } { $i < $beg } { incr i } {
+ if { [gets $in junk] < 0 } {
+ break
+ }
+ }
+
+ for { } { $i <= $n } { incr i } {
+ if { [gets $in line] < 0 } {
+ break
+ }
+ puts $out $line
+ }
+
+ close $in
+ close $out
+}
+
+# Remove file (this replaces $RM).
+# Usage: fileremove filenames =~ rm; fileremove -f filenames =~ rm -rf.
+proc fileremove { args } {
+ set forceflag ""
+ foreach a $args {
+ if { [string first - $a] == 0 } {
+ # It's a flag. Better be f.
+ if { [string first f $a] != 1 } {
+ return -code error "bad flag to fileremove"
+ } else {
+ set forceflag "-force"
+ }
+ } else {
+ eval {file delete $forceflag $a}
+ }
+ }
+}
+
+proc findfail { args } {
+ set errstring {}
+ foreach a $args {
+ if { [file exists $a] == 0 } {
+ continue
+ }
+ set f [open $a r]
+ while { [gets $f line] >= 0 } {
+ if { [string first FAIL $line] == 0 } {
+ lappend errstring $a:$line
+ }
+ }
+ close $f
+ }
+ return $errstring
+}
+
+# Sleep for s seconds.
+proc tclsleep { s } {
+ # On Windows, the system time-of-day clock may update as much
+ # as 55 ms late due to interrupt timing. Don't take any
+ # chances; sleep extra-long so that when tclsleep 1 returns,
+ # it's guaranteed to be a new second.
+ after [expr $s * 1000 + 56]
+}
+
+# Kill a process.
+proc tclkill { id } {
+ source ./include.tcl
+
+ while { [ catch {exec $KILL -0 $id} ] == 0 } {
+ catch {exec $KILL -9 $id}
+ tclsleep 5
+ }
+}
+
+# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
+proc filecmp { file_a file_b } {
+ set fda [open $file_a r]
+ set fdb [open $file_b r]
+
+ fconfigure $fda -translation binary
+ fconfigure $fdb -translation binary
+
+ set nra 0
+ set nrb 0
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ while { $nra >= 0 && $nrb >= 0 } {
+ set nra [gets $fda aline]
+ set nrb [gets $fdb bline]
+
+ if { $nra != $nrb || [string compare $aline $bline] != 0} {
+ close $fda
+ close $fdb
+ return 1
+ }
+ }
+
+ close $fda
+ close $fdb
+ return 0
+}
+
+# Compare the log files from 2 envs. Returns 1 if non-identical,
+# 0 if identical.
+proc logcmp { env1 env2 { compare_shared_portion 0 } } {
+ set lc1 [$env1 log_cursor]
+ set lc2 [$env2 log_cursor]
+
+ # If we're comparing the full set of logs in both envs,
+ # set the starting point by looking at the first LSN in the
+ # first env's logs.
+ #
+ # If we are comparing only the shared portion, look at the
+ # starting LSN of the second env as well, and select the
+ # LSN that is larger.
+
+ set start [lindex [$lc1 get -first] 0]
+
+ if { $compare_shared_portion } {
+ set e2_lsn [lindex [$lc2 get -first] 0]
+ if { [$env1 log_compare $start $e2_lsn] < 0 } {
+ set start $e2_lsn
+ }
+ }
+
+ # Read through and compare the logs record by record.
+ for { set l1 [$lc1 get -set $start] ; set l2 [$lc2 get -set $start] }\
+ { [llength $l1] > 0 && [llength $l2] > 0 }\
+ { set l1 [$lc1 get -next] ; set l2 [$lc2 get -next] } {
+ if { [string equal $l1 $l2] != 1 } {
+ $lc1 close
+ $lc2 close
+#puts "l1 is $l1"
+#puts "l2 is $l2"
+ return 1
+ }
+ }
+ $lc1 close
+ $lc2 close
+ return 0
+}
+
+# Give two SORTED files, one of which is a complete superset of the other,
+# extract out the unique portions of the superset and put them in
+# the given outfile.
+proc fileextract { superset subset outfile } {
+ set sup [open $superset r]
+ set sub [open $subset r]
+ set outf [open $outfile w]
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ set nrp [gets $sup pline]
+ set nrb [gets $sub bline]
+ while { $nrp >= 0 } {
+ if { $nrp != $nrb || [string compare $pline $bline] != 0} {
+ puts $outf $pline
+ } else {
+ set nrb [gets $sub bline]
+ }
+ set nrp [gets $sup pline]
+ }
+
+ close $sup
+ close $sub
+ close $outf
+ return 0
+}
+
+# Verify all .db files in the specified directory.
+proc verify_dir { {directory $testdir} { pref "" } \
+ { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } { unref 1 } } {
+ global encrypt
+ global passwd
+
+ # If we're doing database verification between tests, we don't
+ # want to do verification twice without an intervening cleanup--some
+ # test was skipped. Always verify by default (noredo == 0) so
+ # that explicit calls to verify_dir during tests don't require
+ # cleanup commands.
+ if { $noredo == 1 } {
+ if { [file exists $directory/NOREVERIFY] == 1 } {
+ if { $quiet == 0 } {
+ puts "Skipping verification."
+ }
+ return 0
+ }
+ set f [open $directory/NOREVERIFY w]
+ close $f
+ }
+
+ if { [catch {glob $directory/*.db} dbs] != 0 } {
+ # No files matched
+ return 0
+ }
+ set ret 0
+
+ # Open an env, so that we have a large enough cache. Pick
+ # a fairly generous default if we haven't specified something else.
+
+ if { $cachesize == 0 } {
+ set cachesize [expr 1024 * 1024]
+ }
+ set encarg ""
+ if { $encrypt != 0 } {
+ set encarg "-encryptaes $passwd"
+ }
+
+ set env [eval {berkdb_env -create -private} $encarg \
+ {-cachesize [list 0 $cachesize 0]}]
+ set earg " -env $env "
+
+ # The 'unref' flag means that we report unreferenced pages
+ # at all times. This is the default behavior.
+ # If we have a test which leaves unreferenced pages on systems
+ # where HAVE_FTRUNCATE is not on, then we call verify_dir with
+ # unref == 0.
+ set uflag "-unref"
+ if { $unref == 0 } {
+ set uflag ""
+ }
+
+ foreach db $dbs {
+ # Replication's temp db uses a custom comparison function,
+ # so we can't verify it.
+ #
+ if { [file tail $db] == "__db.rep.db" } {
+ continue
+ }
+ if { [catch \
+ {eval {berkdb dbverify} $uflag $earg $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Verification of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good verify:$db $res 0
+ if { $quiet == 0 } {
+ puts "${pref}Verification of $db succeeded."
+ }
+ }
+
+ # Skip the dump if it's dangerous to do it.
+ if { $nodump == 0 } {
+ if { [catch {eval dumploadtest $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Dump/load of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good dumpload:$db $res 0
+ if { $quiet == 0 } {
+ puts \
+ "${pref}Dump/load of $db succeeded."
+ }
+ }
+ }
+ }
+
+ error_check_good vrfyenv_close [$env close] 0
+
+ return $ret
+}
+
+# Is the database handle in $db a master database containing subdbs?
+proc check_for_subdbs { db } {
+ set stat [$db stat]
+ for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } {
+ set elem [lindex $stat $i]
+ if { [string compare [lindex $elem 0] Flags] == 0 } {
+ # This is the list of flags; look for
+ # "subdatabases".
+ if { [is_substr [lindex $elem 1] subdatabases] } {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+proc db_compare { olddb newdb olddbname newdbname } {
+ # Walk through olddb and newdb and make sure their contents
+ # are identical.
+ set oc [$olddb cursor]
+ set nc [$newdb cursor]
+ error_check_good orig_cursor($olddbname) \
+ [is_valid_cursor $oc $olddb] TRUE
+ error_check_good new_cursor($olddbname) \
+ [is_valid_cursor $nc $newdb] TRUE
+
+ for { set odbt [$oc get -first -nolease] } { [llength $odbt] > 0 } \
+ { set odbt [$oc get -next -nolease] } {
+ set ndbt [$nc get -get_both -nolease \
+ [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]]
+ if { [binary_compare $ndbt $odbt] == 1 } {
+ error_check_good oc_close [$oc close] 0
+ error_check_good nc_close [$nc close] 0
+# puts "FAIL: $odbt does not match $ndbt"
+ return 1
+ }
+ }
+
+ for { set ndbt [$nc get -first -nolease] } { [llength $ndbt] > 0 } \
+ { set ndbt [$nc get -next -nolease] } {
+ set odbt [$oc get -get_both -nolease \
+ [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]]
+ if { [binary_compare $ndbt $odbt] == 1 } {
+ error_check_good oc_close [$oc close] 0
+ error_check_good nc_close [$nc close] 0
+# puts "FAIL: $odbt does not match $ndbt"
+ return 1
+ }
+ }
+
+ error_check_good orig_cursor_close($olddbname) [$oc close] 0
+ error_check_good new_cursor_close($newdbname) [$nc close] 0
+
+ return 0
+}
+
+proc dumploadtest { db } {
+ global util_path
+ global encrypt
+ global passwd
+
+ set newdbname $db-dumpload.db
+
+ set dbarg ""
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set dbarg "-encryptany $passwd"
+ set utilflag "-P $passwd"
+ }
+
+ # Dump/load the whole file, including all subdbs.
+
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ $db | $util_path/db_load $utilflag $newdbname} res]
+ error_check_good db_dump/db_load($db:$res) $rval 0
+
+ # If the old file was empty, there's no new file and we're done.
+ if { [file exists $newdbname] == 0 } {
+ return 0
+ }
+
+ # Open original database.
+ set olddb [eval {berkdb_open -rdonly} $dbarg $db]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ if { [check_for_subdbs $olddb] } {
+ # If $db has subdatabases, compare each one separately.
+ set oc [$olddb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+
+ for { set dbt [$oc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$oc get -next] } {
+ set subdb [lindex [lindex $dbt 0] 0]
+
+ set oldsubdb \
+ [eval {berkdb_open -rdonly} $dbarg {$db $subdb}]
+ error_check_good olddb($db) [is_valid_db $oldsubdb] TRUE
+
+ # Open the new database.
+ set newdb \
+ [eval {berkdb_open -rdonly} $dbarg {$newdbname $subdb}]
+ error_check_good newdb($db) [is_valid_db $newdb] TRUE
+
+ db_compare $oldsubdb $newdb $db $newdbname
+ error_check_good new_db_close($db) [$newdb close] 0
+ error_check_good old_subdb_close($oldsubdb) [$oldsubdb close] 0
+ }
+
+ error_check_good oldcclose [$oc close] 0
+ } else {
+ # Open the new database.
+ set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
+ error_check_good newdb($db) [is_valid_db $newdb] TRUE
+
+ db_compare $olddb $newdb $db $newdbname
+ error_check_good new_db_close($db) [$newdb close] 0
+ }
+
+ error_check_good orig_db_close($db) [$olddb close] 0
+ eval berkdb dbremove $dbarg $newdbname
+}
+
+# Test regular and aggressive salvage procedures for all databases
+# in a directory.
+proc salvage_dir { dir { noredo 0 } { quiet 0 } } {
+ global util_path
+ global encrypt
+ global passwd
+
+ # If we're doing salvage testing between tests, don't do it
+ # twice without an intervening cleanup.
+ if { $noredo == 1 } {
+ if { [file exists $dir/NOREDO] == 1 } {
+ if { $quiet == 0 } {
+ puts "Skipping salvage testing."
+ }
+ return 0
+ }
+ set f [open $dir/NOREDO w]
+ close $f
+ }
+
+ if { [catch {glob $dir/*.db} dbs] != 0 } {
+ # No files matched
+ return 0
+ }
+
+ foreach db $dbs {
+ set dumpfile $db-dump
+ set sorteddump $db-dump-sorted
+ set salvagefile $db-salvage
+ set sortedsalvage $db-salvage-sorted
+ set aggsalvagefile $db-aggsalvage
+
+ set dbarg ""
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set dbarg "-encryptany $passwd"
+ set utilflag "-P $passwd"
+ }
+
+ # Dump the database with salvage, with aggressive salvage,
+ # and without salvage.
+ #
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -r \
+ -f $salvagefile $db} res]
+ error_check_good salvage($db:$res) $rval 0
+ filesort $salvagefile $sortedsalvage
+
+ # We can't avoid occasional verify failures in aggressive
+ # salvage. Make sure it's the expected failure.
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -R \
+ -f $aggsalvagefile $db} res]
+ if { $rval == 1 } {
+#puts "res is $res"
+ error_check_good agg_failure \
+ [is_substr $res "DB_VERIFY_BAD"] 1
+ } else {
+ error_check_good aggressive_salvage($db:$res) $rval 0
+ }
+
+ # Queue databases must be dumped with -k to display record
+ # numbers if we're not in salvage mode.
+ if { [isqueuedump $salvagefile] == 1 } {
+ append utilflag " -k "
+ }
+
+ # Discard db_pagesize lines from file dumped with ordinary
+ # db_dump -- they are omitted from a salvage dump.
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag \
+ -f $dumpfile $db} res]
+ error_check_good dump($db:$res) $rval 0
+ filesort $dumpfile $sorteddump
+ discardline $sorteddump TEMPFILE "db_pagesize="
+ file copy -force TEMPFILE $sorteddump
+
+ # A non-aggressively salvaged file should match db_dump.
+ error_check_good compare_dump_and_salvage \
+ [filecmp $sorteddump $sortedsalvage] 0
+
+ puts "Salvage tests of $db succeeded."
+ }
+}
+
+# Reads infile, writes to outfile, discarding any line whose
+# beginning matches the given string.
+proc discardline { infile outfile discard } {
+ set fdin [open $infile r]
+ set fdout [open $outfile w]
+
+ while { [gets $fdin str] >= 0 } {
+ if { [string match $discard* $str] != 1 } {
+ puts $fdout $str
+ }
+ }
+ close $fdin
+ close $fdout
+}
+
+# Inspects dumped file for "type=" line. Returns 1 if type=queue.
+proc isqueuedump { file } {
+ set fd [open $file r]
+
+ while { [gets $fd str] >= 0 } {
+ if { [string match type=* $str] == 1 } {
+ if { [string match "type=queue" $str] == 1 } {
+ close $fd
+ return 1
+ } else {
+ close $fd
+ return 0
+ }
+ }
+ }
+ close $fd
+}
+
+# Generate randomly ordered, guaranteed-unique four-character strings that can
+# be used to differentiate duplicates without creating duplicate duplicates.
+# (test031 & test032) randstring_init is required before the first call to
+# randstring and initializes things for up to $i distinct strings; randstring
+# gets the next string.
+proc randstring_init { i } {
+ global rs_int_list alphabet
+
+ # Fail if we can't generate sufficient unique strings.
+ if { $i > [expr 26 * 26 * 26 * 26] } {
+ set errstring\
+ "Duplicate set too large for random string generator"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set rs_int_list {}
+
+ # generate alphabet array
+ for { set j 0 } { $j < 26 } { incr j } {
+ set a($j) [string index $alphabet $j]
+ }
+
+ # Generate a list with $i elements, { aaaa, aaab, ... aaaz, aaba ...}
+ for { set d1 0 ; set j 0 } { $d1 < 26 && $j < $i } { incr d1 } {
+ for { set d2 0 } { $d2 < 26 && $j < $i } { incr d2 } {
+ for { set d3 0 } { $d3 < 26 && $j < $i } { incr d3 } {
+ for { set d4 0 } { $d4 < 26 && $j < $i } \
+ { incr d4 } {
+ lappend rs_int_list \
+ $a($d1)$a($d2)$a($d3)$a($d4)
+ incr j
+ }
+ }
+ }
+ }
+
+ # Randomize the list.
+ set rs_int_list [randomize_list $rs_int_list]
+}
+
+# Randomize a list. Returns a randomly-reordered copy of l.
+proc randomize_list { l } {
+ set i [llength $l]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ # Pick a random element from $j to the end
+ set k [berkdb random_int $j [expr $i - 1]]
+
+ # Swap it with element $j
+ set t1 [lindex $l $j]
+ set t2 [lindex $l $k]
+
+ set l [lreplace $l $j $j $t2]
+ set l [lreplace $l $k $k $t1]
+ }
+
+ return $l
+}
+
+proc randstring {} {
+ global rs_int_list
+
+ if { [info exists rs_int_list] == 0 || [llength $rs_int_list] == 0 } {
+ set errstring "randstring uninitialized or used too often"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set item [lindex $rs_int_list 0]
+ set rs_int_list [lreplace $rs_int_list 0 0]
+
+ return $item
+}
+
+# Takes a variable-length arg list, and returns a list containing the list of
+# the non-hyphenated-flag arguments, followed by a list of each alphanumeric
+# flag it finds.
+proc extractflags { args } {
+ set inflags 1
+ set flags {}
+ while { $inflags == 1 } {
+ set curarg [lindex $args 0]
+ if { [string first "-" $curarg] == 0 } {
+ set i 1
+ while {[string length [set f \
+ [string index $curarg $i]]] > 0 } {
+ incr i
+ if { [string compare $f "-"] == 0 } {
+ set inflags 0
+ break
+ } else {
+ lappend flags $f
+ }
+ }
+ set args [lrange $args 1 end]
+ } else {
+ set inflags 0
+ }
+ }
+ return [list $args $flags]
+}
+
+# Wrapper for berkdb open, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_open { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb open} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_open_noerr { args } {
+ eval {berkdb open} $args
+}
+
+# Wrapper for berkdb env, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_env { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb env} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_env_noerr { args } {
+ eval {berkdb env} $args
+}
+
+proc check_handles { {outf stdout} } {
+ global ohandles
+
+ set handles [berkdb handles]
+ if {[llength $handles] != [llength $ohandles]} {
+ puts $outf "WARNING: Open handles during cleanup: $handles"
+ }
+ set ohandles $handles
+}
+
+proc open_handles { } {
+ return [llength [berkdb handles]]
+}
+
+# Will close any database and cursor handles, cursors first.
+# Ignores other handles, like env handles.
+proc close_db_handles { } {
+ set handles [berkdb handles]
+ set db_handles {}
+ set cursor_handles {}
+
+ # Find the handles we want to process. We can't use
+ # is_valid_cursor to find cursors because we don't know
+ # the cursor's parent database handle.
+ foreach handle $handles {
+ if {[string range $handle 0 1] == "db"} {
+ if { [string first "c" $handle] != -1} {
+ lappend cursor_handles $handle
+ } else {
+ lappend db_handles $handle
+ }
+ }
+ }
+
+ foreach handle $cursor_handles {
+ error_check_good cursor_close [$handle close] 0
+ }
+ foreach handle $db_handles {
+ error_check_good db_close [$handle close] 0
+ }
+}
+
+proc move_file_extent { dir dbfile tag op } {
+ set curfiles [get_extfiles $dir $dbfile ""]
+ set tagfiles [get_extfiles $dir $dbfile $tag]
+ #
+ # We want to copy or rename only those that have been saved,
+ # so delete all the current extent files so that we don't
+ # end up with extra ones we didn't restore from our saved ones.
+ foreach extfile $curfiles {
+ file delete -force $extfile
+ }
+ foreach extfile $tagfiles {
+ set dbq [make_ext_filename $dir $dbfile $extfile]
+ #
+ # We can either copy or rename
+ #
+ file $op -force $extfile $dbq
+ }
+}
+
+proc copy_extent_file { dir dbfile tag { op copy } } {
+ set files [get_extfiles $dir $dbfile ""]
+ foreach extfile $files {
+ set dbq [make_ext_filename $dir $dbfile $extfile $tag]
+ file $op -force $extfile $dbq
+ }
+}
+
+proc get_extfiles { dir dbfile tag } {
+ if { $tag == "" } {
+ set filepat $dir/__db?.$dbfile.\[0-9\]*
+ } else {
+ set filepat $dir/__db?.$dbfile.$tag.\[0-9\]*
+ }
+ return [glob -nocomplain -- $filepat]
+}
+
+proc make_ext_filename { dir dbfile extfile {tag ""}} {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ set j [string last "/" $extfile]
+ incr j
+ set i [string first "." [string range $extfile $j end]]
+ incr i $j
+ incr i -1
+ set prefix [string range $extfile $j $i]
+ if {$tag == "" } {
+ return $dir/$prefix.$dbfile.$extnum
+ } else {
+ return $dir/$prefix.$dbfile.$tag.$extnum
+ }
+}
+
+# All pids for Windows 9X are negative values. When we want to have
+# unsigned int values, unique to the process, we'll take the absolute
+# value of the pid. This avoids unsigned/signed mistakes, yet
+# guarantees uniqueness, since each system has pids that are all
+# either positive or negative.
+#
+proc sanitized_pid { } {
+ set mypid [pid]
+ if { $mypid < 0 } {
+ set mypid [expr - $mypid]
+ }
+ puts "PID: [pid] $mypid\n"
+ return $mypid
+}
+
+#
+# Extract the page size field from a stat record. Return -1 if
+# none is found.
+#
+proc get_pagesize { stat } {
+ foreach field $stat {
+ set title [lindex $field 0]
+ if {[string compare $title "Page size"] == 0} {
+ return [lindex $field 1]
+ }
+ }
+ return -1
+}
+
+# Get a globbed list of source files and executables to use as large
+# data items in overflow page tests.
+proc get_file_list { {small 0} } {
+ global is_windows_test
+ global is_qnx_test
+ global is_je_test
+ global src_root
+
+ # Skip libraries if we have a debug build.
+ if { $is_qnx_test || $is_je_test || [is_debug] == 1 } {
+ set small 1
+ }
+
+ if { $small && $is_windows_test } {
+ set templist [glob $src_root/*/*.c */env*.obj]
+ } elseif { $small } {
+ set templist [glob $src_root/*/*.c ./env*.o]
+ } elseif { $is_windows_test } {
+ set templist \
+ [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll]
+ } else {
+ set templist [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?]
+ }
+
+ # We don't want a huge number of files, but we do want a nice
+ # variety. If there are more than nfiles files, pick out a list
+ # by taking every other, or every third, or every nth file.
+ set filelist {}
+ set nfiles 500
+ if { [llength $templist] > $nfiles } {
+ set skip \
+ [expr [llength $templist] / [expr [expr $nfiles / 3] * 2]]
+ set i $skip
+ while { $i < [llength $templist] } {
+ lappend filelist [lindex $templist $i]
+ incr i $skip
+ }
+ } else {
+ set filelist $templist
+ }
+ return $filelist
+}
+
+proc is_cdbenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -cdb] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_lockenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -lock] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_logenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -log] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_mpoolenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -mpool] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_repenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -rep] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rpcenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -rpc] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_secenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -crypto] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_txnenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -txn] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc get_home { env } {
+ set sys [$env attributes]
+ set h [lsearch $sys -home]
+ if { $h == -1 } {
+ return NULL
+ }
+ incr h
+ return [lindex $sys $h]
+}
+
+proc reduce_dups { nent ndp } {
+ upvar $nent nentries
+ upvar $ndp ndups
+
+ # If we are using a txnenv, assume it is using
+ # the default maximum number of locks, cut back
+ # so that we don't run out of locks. Reduce
+ # by 25% until we fit.
+ #
+ while { [expr $nentries * $ndups] > 5000 } {
+ set nentries [expr ($nentries / 4) * 3]
+ set ndups [expr ($ndups / 4) * 3]
+ }
+}
+
+proc getstats { statlist field } {
+ foreach pair $statlist {
+ set txt [lindex $pair 0]
+ if { [string equal $txt $field] == 1 } {
+ return [lindex $pair 1]
+ }
+ }
+ return -1
+}
+
+# Return the value for a particular field in a set of statistics.
+# Works for regular db stat as well as env stats (log_stat,
+# lock_stat, txn_stat, rep_stat, etc.).
+proc stat_field { handle which_stat field } {
+ set stat [$handle $which_stat]
+ return [getstats $stat $field ]
+}
+
+proc big_endian { } {
+ global tcl_platform
+ set e $tcl_platform(byteOrder)
+ if { [string compare $e littleEndian] == 0 } {
+ return 0
+ } elseif { [string compare $e bigEndian] == 0 } {
+ return 1
+ } else {
+ error "FAIL: Unknown endianness $e"
+ }
+}
+
+# Check if this is a debug build. Use 'string equal' so we
+# don't get fooled by debug_rop and debug_wop.
+proc is_debug { } {
+
+ set conf [berkdb getconfig]
+ foreach item $conf {
+ if { [string equal $item "debug"] } {
+ return 1
+ }
+ }
+ return 0
+}
+
+proc adjust_logargs { logtype {lbufsize 0} } {
+ if { $logtype == "in-memory" } {
+ if { $lbufsize == 0 } {
+ set lbuf [expr 1 * [expr 1024 * 1024]]
+ set logargs " -log_inmemory -log_buffer $lbuf "
+ } else {
+ set logargs " -log_inmemory -log_buffer $lbufsize "
+ }
+ } elseif { $logtype == "on-disk" } {
+ set logargs ""
+ } else {
+ error "FAIL: unrecognized log type $logtype"
+ }
+ return $logargs
+}
+
+proc adjust_txnargs { logtype } {
+ if { $logtype == "in-memory" } {
+ set txnargs " -txn "
+ } elseif { $logtype == "on-disk" } {
+ set txnargs " -txn nosync "
+ } else {
+ error "FAIL: unrecognized log type $logtype"
+ }
+ return $txnargs
+}
+
+proc get_logfile { env where } {
+ # Open a log cursor.
+ set m_logc [$env log_cursor]
+ error_check_good m_logc [is_valid_logc $m_logc $env] TRUE
+
+ # Check that we're in the expected virtual log file.
+ if { $where == "first" } {
+ set rec [$m_logc get -first]
+ } else {
+ set rec [$m_logc get -last]
+ }
+ error_check_good cursor_close [$m_logc close] 0
+ set lsn [lindex $rec 0]
+ set log [lindex $lsn 0]
+ return $log
+}
+
+# Determine whether logs are in-mem or on-disk.
+# This requires the existence of logs to work correctly.
+proc check_log_location { env } {
+ if { [catch {get_logfile $env first} res] } {
+ puts "FAIL: env $env not configured for logging"
+ }
+ set inmemory [$env log_get_config inmemory]
+
+ set env_home [get_home $env]
+ set logfiles [glob -nocomplain $env_home/log.*]
+ if { $inmemory == 1 } {
+ error_check_good no_logs_on_disk [llength $logfiles] 0
+ } else {
+ error_check_bad logs_on_disk [llength $logfiles] 0
+ }
+}
+
+# Given the env and file name, verify that a given database is on-disk
+# or in-memory as expected. If "db_on_disk" is 1, "databases_in_memory"
+# is 0 and vice versa, so we use error_check_bad.
+proc check_db_location { env { dbname "test.db" } { datadir "" } } {
+ global databases_in_memory
+
+ if { $datadir != "" } {
+ set env_home $datadir
+ } else {
+ set env_home [get_home $env]
+ }
+ set db_on_disk [file exists $env_home/$dbname]
+
+ error_check_bad db_location $db_on_disk $databases_in_memory
+}
+
+# If we have a private env, check that no region files are found on-disk.
+proc no_region_files_on_disk { dir } {
+ set regionfiles [glob -nocomplain $dir/__db.???]
+ error_check_good regionfiles [llength $regionfiles] 0
+ global env_private
+ if { $env_private } {
+ set regionfiles [glob -nocomplain $dir/__db.???]
+ error_check_good regionfiles [llength $regionfiles] 0
+ }
+}
+
+proc find_valid_methods { test } {
+ global checking_valid_methods
+ global valid_methods
+
+ # To find valid methods, call the test with checking_valid_methods
+ # on. It doesn't matter what method we use for this call, so we
+ # arbitrarily pick btree.
+ #
+ set checking_valid_methods 1
+ set test_methods [$test btree]
+ set checking_valid_methods 0
+ if { $test_methods == "ALL" } {
+ return $valid_methods
+ } else {
+ return $test_methods
+ }
+}
+
+proc part {data} {
+ if { [string length $data] < 2 } {
+ return 0
+ }
+ binary scan $data s res
+ return $res
+}
+
+proc my_isalive { pid } {
+ source ./include.tcl
+
+ if {[catch {exec $KILL -0 $pid}]} {
+ return 0
+ }
+ return 1
+}
diff --git a/db-4.8.30/test/txn001.tcl b/db-4.8.30/test/txn001.tcl
new file mode 100644
index 0000000..44a5f70
--- /dev/null
+++ b/db-4.8.30/test/txn001.tcl
@@ -0,0 +1,114 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn001
+# TEST Begin, commit, abort testing.
+proc txn001 { {tnum "001"} { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn$tnum: Basic begin, commit, abort"
+
+ if { $tnum != "001"} {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ # Open environment
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -mode 0644 -txn \
+ -txn_max $max -home $testdir}]
+ error_check_good evn_open [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [ $env txn_id_set $txn_curid $txn_maxid ] 0
+ txn001_suba $ntxns $env $tnum
+ txn001_subb $ntxns $env $tnum
+ txn001_subc $ntxns $env $tnum
+ # Close and unlink the file
+ error_check_good env_close:$env [$env close] 0
+}
+
+proc txn001_suba { ntxns env tnum } {
+ source ./include.tcl
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn$tnum.a: Beginning/Committing $ntxns Transactions in $env"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+}
+
+proc txn001_subb { ntxns env tnum } {
+ # We will create a bunch of transactions and abort them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn$tnum.b: Beginning/Aborting Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now abort them all
+ foreach t $txn_list {
+ error_check_good txn_abort:$t [$t abort] 0
+ }
+}
+
+proc txn001_subc { ntxns env tnum } {
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn$tnum.c: Beginning/Prepare/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now prepare them all
+ foreach t $txn_list {
+ error_check_good txn_prepare:$t \
+ [$t prepare [make_gid global:$t]] 0
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+}
+
diff --git a/db-4.8.30/test/txn002.tcl b/db-4.8.30/test/txn002.tcl
new file mode 100644
index 0000000..378c8bf
--- /dev/null
+++ b/db-4.8.30/test/txn002.tcl
@@ -0,0 +1,89 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn002
+# TEST Verify that read-only transactions do not write log records.
+proc txn002 { {tnum "002" } { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn$tnum: Read-only transaction test ($max) ($ntxns)"
+
+ if { $tnum != "002" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ env_cleanup $testdir
+ set env [berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid ] 0
+
+ # Save the current bytes in the log.
+ set off_start [txn002_logoff $env]
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn$tnum.a: Beginning/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Make sure we haven't written any new log records except
+ # potentially some recycle records if we were wrapping txnids.
+ set off_stop [txn002_logoff $env]
+ if { $off_stop != $off_start } {
+ txn002_recycle_only $testdir
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc txn002_logoff { env } {
+ set stat [$env log_stat]
+ foreach i $stat {
+ foreach {txt val} $i {break}
+ if { [string compare \
+ $txt {Current log file offset}] == 0 } {
+ return $val
+ }
+ }
+}
+
+# Make sure that the only log records found are txn_recycle records
+proc txn002_recycle_only { dir } {
+ global util_path
+
+ set tmpfile $dir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+
+ set f [open $tmpfile r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good record_type __txn_recycle $name
+ }
+ }
+ close $f
+ fileremove $tmpfile
+}
diff --git a/db-4.8.30/test/txn003.tcl b/db-4.8.30/test/txn003.tcl
new file mode 100644
index 0000000..90b3e9e
--- /dev/null
+++ b/db-4.8.30/test/txn003.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn003
+# TEST Test abort/commit/prepare of txns with outstanding child txns.
+proc txn003 { {tnum "003"} } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn$tnum: Outstanding child transaction test"
+
+ if { $tnum != "003" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+ env_cleanup $testdir
+ set testfile txn003.db
+
+ set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid] 0
+
+ set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile}
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Put some data so that we can check commit or abort of child
+ #
+ set key 1
+ set origdata some_data
+ set newdata this_is_new_data
+ set newdata2 some_other_new_data
+
+ error_check_good db_put [$db put $key $origdata] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ txn003_check $db $key "Origdata" $origdata
+
+ puts "\tTxn$tnum.a: Parent abort"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_abort [$parent abort] 0
+ txn003_check $db $key "parent_abort" $origdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+ puts "\tTxn$tnum.b: Parent commit"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_commit [$parent commit] 0
+ txn003_check $db $key "parent_commit" $newdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Since the data check assumes what has come before, the 'commit'
+ # operation must be last.
+ #
+ set hdr "\tTxn$tnum"
+ set rlist {
+ {begin ".c"}
+ {prepare ".d"}
+ {abort ".e"}
+ {commit ".f"}
+ }
+ set count 0
+ foreach pair $rlist {
+ incr count
+ set op [lindex $pair 0]
+ set msg [lindex $pair 1]
+ set msg $hdr$msg
+ txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ berkdb debug_check
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ #
+ # For prepare we'll then just
+ # end up aborting after we test what we need to.
+ # So set gooddata to the same as abort.
+ switch $op {
+ abort {
+ set gooddata $newdata
+ }
+ begin {
+ set gooddata $newdata
+ }
+ commit {
+ set gooddata $newdata2
+ }
+ prepare {
+ set gooddata $newdata
+ }
+ }
+ txn003_check $db $key "parent_$op" $gooddata
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+
+ puts "\tTxn$tnum.g: Attempt child prepare"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ berkdb debug_check
+ set db [eval {berkdb_open_noerr} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ set gid [make_gid child_prepare:$child]
+ set stat [catch {$child prepare $gid} ret]
+ error_check_good child_prepare $stat 1
+ error_check_good child_prep_err [is_substr $ret "txn prepare"] 1
+
+ puts "\tTxn$tnum.h: Attempt child discard"
+ set stat [catch {$child discard} ret]
+ error_check_good child_discard $stat 1
+
+ # We just panic'd the region, so the next operations will fail.
+ # No matter, we still have to clean up all the handles.
+
+ set stat [catch {$parent commit} ret]
+ error_check_good parent_commit $stat 1
+ error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 1
+ error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+}
+
+proc txn003_body { env_cmd testfile dir key newdata2 msg op } {
+ source ./include.tcl
+
+ berkdb debug_check
+ sentinel_init
+ set gidf $dir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \
+ $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/txnout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/txnout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd "-recover"]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 1
+ set tpair [lindex $txnlist 0]
+
+ set gfd [open $gidf r]
+ set ret [gets $gfd parentgid]
+ close $gfd
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ if { $op == "begin" } {
+ puts "$msg.2: $op new txn"
+ } else {
+ puts "$msg.2: $op parent"
+ }
+ error_check_good gidcompare $gid $parentgid
+ if { $op == "prepare" } {
+ set gid [make_gid prepare_recover:$txn]
+ set stat [catch {$txn $op $gid} ret]
+ error_check_good prep_error $stat 1
+ error_check_good prep_err \
+ [is_substr $ret "transaction already prepared"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } elseif { $op == "begin" } {
+ # As of the 4.6 release, we allow new txns to be created
+ # while prepared but not committed txns exist, so this
+ # should succeed.
+ set txn2 [$env txn]
+ error_check_good txn:begin_abort [$txn abort] 0
+ error_check_good txn2:begin_abort [$txn2 abort] 0
+ } else {
+ error_check_good txn:$op [$txn $op] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+proc txn003_check { db key msg gooddata } {
+ set kd [$db get $key]
+ set data [lindex [lindex $kd 0] 1]
+ error_check_good $msg $data $gooddata
+}
diff --git a/db-4.8.30/test/txn004.tcl b/db-4.8.30/test/txn004.tcl
new file mode 100644
index 0000000..9a99bcd
--- /dev/null
+++ b/db-4.8.30/test/txn004.tcl
@@ -0,0 +1,60 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn004
+# TEST Test of wraparound txnids (txn001)
+proc txn004 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn004.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn001 "004.1"
+ puts "\tTxn004.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn001 "004.2"
+
+ puts "\tTxn004.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
+proc txn_idwrap_check { testdir } {
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ # txn3 will require a wraparound txnid
+ # XXX How can we test it has a wrapped id?
+ set txn3 [$e txn]
+ error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE
+
+ error_check_good free_txn1 [$txn1 commit] 0
+ error_check_good free_txn2 [$txn2 commit] 0
+ error_check_good free_txn3 [$txn3 commit] 0
+
+ error_check_good close [$e close] 0
+}
+
diff --git a/db-4.8.30/test/txn005.tcl b/db-4.8.30/test/txn005.tcl
new file mode 100644
index 0000000..66cbe11
--- /dev/null
+++ b/db-4.8.30/test/txn005.tcl
@@ -0,0 +1,73 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn005
+# TEST Test transaction ID wraparound and recovery.
+proc txn005 {} {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+ puts "Txn005: Test transaction wraparound recovery"
+
+ # Open/create the txn region
+ puts "\tTxn005.a: Create environment"
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good txn1_commit [$txn1 commit] 0
+
+ puts "\tTxn005.b: Set txn ids"
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ # txn2 and txn3 will require a wraparound txnid
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ error_check_good put [$db put -txn $txn2 "a" ""] 0
+ error_check_good txn2_commit [$txn2 commit] 0
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+
+ error_check_good close [$db close] 0
+
+ set txn3 [$e txn]
+ error_check_good txn3 [is_valid_txn $txn3 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn3 -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good put2 [$db put -txn $txn3 "b" ""] 0
+ error_check_good sync [$db sync] 0
+ error_check_good txn3_abort [$txn3 abort] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+
+ puts "\tTxn005.c: Run recovery"
+ set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tTxn005.d: Check data"
+ set e [berkdb_env -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set db [berkdb_open -env $e -auto_commit -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+ error_check_bad get_b [$db get "b"] "{b {}}"
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+}
diff --git a/db-4.8.30/test/txn006.tcl b/db-4.8.30/test/txn006.tcl
new file mode 100644
index 0000000..8a70077
--- /dev/null
+++ b/db-4.8.30/test/txn006.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+#TEST txn006
+#TEST Test dump/load in transactional environment.
+proc txn006 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn006.db
+
+ puts "Txn006: Test dump/load in transaction environment"
+ env_cleanup $testdir
+
+ puts "\tTxn006.a: Create environment and database"
+ # Open/create the txn region
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb_open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+
+ puts "\tTxn006.b: Put data"
+ # Put some data
+ for { set i 1 } { $i < $iter } { incr i } {
+ error_check_good put [$db put -txn $txn key$i data$i] 0
+ }
+
+ # End transaction, close db
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+
+ puts "\tTxn006.c: dump/load"
+ # Dump and load
+ exec $util_path/db_dump -p -h $testdir $testfile | \
+ $util_path/db_load -h $testdir $testfile
+}
diff --git a/db-4.8.30/test/txn007.tcl b/db-4.8.30/test/txn007.tcl
new file mode 100644
index 0000000..9f68f50
--- /dev/null
+++ b/db-4.8.30/test/txn007.tcl
@@ -0,0 +1,56 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+#TEST txn007
+#TEST Test of DB_TXN_WRITE_NOSYNC
+proc txn007 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn007.db
+
+ puts "Txn007: DB_TXN_WRITE_NOSYNC"
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ puts "\tTxn007.a: Create env and database with -wrnosync"
+ set e [berkdb_env -create -home $testdir -txn -wrnosync]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Put some data
+ puts "\tTxn007.b: Put $iter data items in individual transactions"
+ for { set i 1 } { $i < $iter } { incr i } {
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+ $db put -txn $txn key$i data$i
+ error_check_good txn_commit [$txn commit] 0
+ }
+ set stat [$e log_stat]
+ puts "\tTxn007.c: Check log stats"
+ foreach i $stat {
+ set txt [lindex $i 0]
+ if { [string equal $txt {Times log written}] == 1 } {
+ set wrval [lindex $i 1]
+ }
+ if { [string equal $txt {Times log flushed to disk}] == 1 } {
+ set syncval [lindex $i 1]
+ }
+ }
+ error_check_good wrval [expr $wrval >= $iter] 1
+ #
+ # We should have written at least 'iter' number of times,
+ # but not synced on any of those.
+ #
+ set val [expr $wrval - $iter]
+ error_check_good syncval [expr $syncval <= $val] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
diff --git a/db-4.8.30/test/txn008.tcl b/db-4.8.30/test/txn008.tcl
new file mode 100644
index 0000000..d99a494
--- /dev/null
+++ b/db-4.8.30/test/txn008.tcl
@@ -0,0 +1,30 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn008
+# TEST Test of wraparound txnids (txn002)
+proc txn008 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn008.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn002 "008.1"
+ puts "\tTxn008.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn002 "008.2"
+
+ puts "\tTxn008.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/db-4.8.30/test/txn009.tcl b/db-4.8.30/test/txn009.tcl
new file mode 100644
index 0000000..e1884c9
--- /dev/null
+++ b/db-4.8.30/test/txn009.tcl
@@ -0,0 +1,30 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn009
+# TEST Test of wraparound txnids (txn003)
+proc txn009 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn009.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn003 "009.1"
+ puts "\tTxn009.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn003 "009.2"
+
+ puts "\tTxn009.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/db-4.8.30/test/txn010.tcl b/db-4.8.30/test/txn010.tcl
new file mode 100644
index 0000000..f9b1d59
--- /dev/null
+++ b/db-4.8.30/test/txn010.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn010
+# TEST Test DB_ENV->txn_checkpoint arguments/flags
+proc txn010 { } {
+ source ./include.tcl
+
+ puts "Txn010: test DB_ENV->txn_checkpoint arguments/flags."
+ env_cleanup $testdir
+
+ # Open an environment and database.
+ puts "\tTxn010.a: open the environment and a database, checkpoint."
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set db [berkdb_open \
+ -env $env -create -mode 0644 -btree -auto_commit a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Insert some data and do a checkpoint.
+ for { set count 0 } { $count < 100 } { incr count } {
+ set t [$env txn]
+ error_check_good "init: put" \
+ [$db put -txn $t "key_a_$count" "data"] 0
+ error_check_good "init: commit" [$t commit] 0
+ }
+ tclsleep 1
+ error_check_good checkpoint [$env txn_checkpoint] 0
+
+ # Test that checkpoint calls are ignored in quiescent systems.
+ puts "\tTxn010.b: test for checkpoints when system is quiescent"
+ set chkpt [txn010_stat $env "Time of last checkpoint"]
+ for { set count 0 } { $count < 5 } {incr count } {
+ tclsleep 1
+ error_check_good checkpoint [$env txn_checkpoint] 0
+ set test_chkpt [txn010_stat $env "Time of last checkpoint"]
+ error_check_good "quiescent: checkpoint time changed" \
+ [expr $test_chkpt == $chkpt] 1
+ }
+
+ # Add a single record, and test that checkpoint does something.
+ set chkpt [txn010_stat $env "Time of last checkpoint"]
+ set t [$env txn]
+ error_check_good \
+ "quiescent: put" [$db put -txn $t "key_b_$count" "data"] 0
+ error_check_good "quiescent: commit" [$t commit] 0
+ tclsleep 1
+ error_check_good checkpoint [$env txn_checkpoint] 0
+ set test_chkpt [txn010_stat $env "Time of last checkpoint"]
+ error_check_good "quiescent: checkpoint time unchanged" \
+ [expr $test_chkpt > $chkpt] 1
+
+ # Test that -force causes a checkpoint.
+ puts "\tTxn010.c: test checkpoint -force"
+ set chkpt [txn010_stat $env "Time of last checkpoint"]
+ for { set count 0 } { $count < 5 } {incr count } {
+ tclsleep 1
+ error_check_good checkpoint [$env txn_checkpoint -force] 0
+ set test_chkpt [txn010_stat $env "Time of last checkpoint"]
+ error_check_good "force: checkpoint time unchanged" \
+ [expr $test_chkpt > $chkpt] 1
+ set chkpt $test_chkpt
+ }
+
+ # Test that -kbyte doesn't cause a checkpoint unless there's
+ # enough activity.
+ puts "\tTxn010.d: test checkpoint -kbyte"
+
+ # Put in lots of data, and verify that -kbyte causes a checkpoint
+ for { set count 0 } { $count < 1000 } { incr count } {
+ set t [$env txn]
+ error_check_good "kbyte: put" \
+ [$db put -txn $t "key_c_$count" "data"] 0
+ error_check_good "kbyte: commit" [$t commit] 0
+ }
+
+ set chkpt [txn010_stat $env "Time of last checkpoint"]
+ tclsleep 1
+ error_check_good checkpoint [$env txn_checkpoint -kbyte 2] 0
+ set test_chkpt [txn010_stat $env "Time of last checkpoint"]
+ error_check_good "kbytes: checkpoint time unchanged" \
+ [expr $test_chkpt > $chkpt] 1
+
+ # Put in a little data and verify that -kbyte doesn't cause a
+ # checkpoint
+ set chkpt [txn010_stat $env "Time of last checkpoint"]
+ for { set count 0 } { $count < 20 } { incr count } {
+ set t [$env txn]
+ error_check_good "kbyte: put" \
+ [$db put -txn $t "key_d_$count" "data"] 0
+ error_check_good "kbyte: commit" [$t commit] 0
+ tclsleep 1
+ error_check_good checkpoint [$env txn_checkpoint -kbyte 20] 0
+ set test_chkpt [txn010_stat $env "Time of last checkpoint"]
+ error_check_good "kbytes: checkpoint time changed" \
+ [expr $test_chkpt == $chkpt] 1
+ }
+
+ # Test that -min doesn't cause a checkpoint unless enough time has
+ # passed.
+ puts "\tTxn010.e: test checkpoint -min"
+ set t [$env txn]
+ error_check_good "min: put" [$db put -txn $t "key_e_$count" "data"] 0
+ error_check_good "min: commit" [$t commit] 0
+ set chkpt [txn010_stat $env "Time of last checkpoint"]
+ for { set count 0 } { $count < 5 } {incr count } {
+ tclsleep 1
+ error_check_good checkpoint [$env txn_checkpoint -min 2] 0
+ set test_chkpt [txn010_stat $env "Time of last checkpoint"]
+ error_check_good "min: checkpoint time changed" \
+ [expr $test_chkpt == $chkpt] 1
+ }
+
+ # Wait long enough, and then check to see if -min causes a checkpoint.
+ set chkpt [txn010_stat $env "Time of last checkpoint"]
+ tclsleep 120
+ error_check_good checkpoint [$env txn_checkpoint -min 2] 0
+ set test_chkpt [txn010_stat $env "Time of last checkpoint"]
+ error_check_good "min: checkpoint time unchanged" \
+ [expr $test_chkpt > $chkpt] 1
+
+ # Close down the database and the environment.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
+
+# txn010_stat --
+# Return the current log statistics.
+proc txn010_stat { env s } {
+ set stat [$env txn_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: Txn010: stat string $s not found"
+ return 0
+}
diff --git a/db-4.8.30/test/txn011.tcl b/db-4.8.30/test/txn011.tcl
new file mode 100644
index 0000000..139d130
--- /dev/null
+++ b/db-4.8.30/test/txn011.tcl
@@ -0,0 +1,224 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2003-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn011
+# TEST Test durable and non-durable txns.
+# TEST Test a mixed env (with both durable and non-durable
+# TEST dbs), then a purely non-durable env. Make sure commit
+# TEST and abort work, and that only the log records we
+# TEST expect are written.
+# TEST Test that we can't get a durable handle on an open ND
+# TEST database, or vice versa. Test that all subdb's
+# TEST must be of the same type (D or ND).
+proc txn011 { {ntxns 100} } {
+ source ./include.tcl
+ global util_path
+
+ foreach envtype { "" "-private" } {
+ puts "Txn011: Non-durable txns ($envtype)."
+ env_cleanup $testdir
+
+ puts "\tTxn011.a: Persistent env recovery with -log_inmemory"
+ set lbuf [expr 8 * [expr 1024 * 1024]]
+ set env_cmd "berkdb_env -create \
+ -home $testdir -txn -log_inmemory -log_buffer $lbuf"
+ set ndenv [eval $env_cmd $envtype]
+ set db [berkdb_open -create -auto_commit \
+ -btree -env $ndenv -notdurable test.db]
+ check_log_records $testdir
+ error_check_good db_close [$db close] 0
+ error_check_good ndenv_close [$ndenv close] 0
+
+ # Run recovery with -e to retain environment.
+ set stat [catch {exec $util_path/db_recover -e -h $testdir} ret]
+ error_check_good db_printlog $stat 0
+
+ # Rejoin env and make sure that the db is still there.
+ set ndenv [berkdb_env -home $testdir]
+ set db [berkdb_open -auto_commit -env $ndenv test.db]
+ error_check_good db_close [$db close] 0
+ error_check_good ndenv_close [$ndenv close] 0
+ env_cleanup $testdir
+
+ # Start with a new env for the next test.
+ set ndenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $ndenv] TRUE
+
+ # Open/create the database.
+ set testfile notdurable.db
+ set db [eval berkdb_open -create \
+ -auto_commit -env $ndenv -notdurable -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTxn011.b: Abort txns in in-memory logging env."
+ txn011_runtxns $ntxns $db $ndenv abort
+ # Make sure there is nothing in the db.
+ txn011_check_empty $db $ndenv
+
+ puts "\tTxn011.c: Commit txns in in-memory logging env."
+ txn011_runtxns $ntxns $db $ndenv commit
+
+ # Make sure we haven't written any inappropriate log records
+ check_log_records $testdir
+
+ # Clean up non-durable env tests.
+ error_check_good db_close [$db close] 0
+ error_check_good ndenv_close [$ndenv close] 0
+ env_cleanup $testdir
+
+ puts "\tTxn011.d: Set up mixed durable/non-durable test."
+ # Open/create the mixed environment
+ set mixed_env_cmd "berkdb_env_noerr -create \
+ -home $testdir -txn -log_inmemory -log_buffer $lbuf"
+ set env [eval $mixed_env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ check_log_records $testdir
+
+ # Open/create the non-durable database
+ set nondurfile nondurable.db
+ set ndb [berkdb_open_noerr -create\
+ -auto_commit -env $env -btree -notdurable $nondurfile]
+ error_check_good dbopen [is_valid_db $ndb] TRUE
+ check_log_records $testdir
+
+ puts "\tTxn011.e: Abort txns in non-durable db."
+ txn011_runtxns $ntxns $ndb $env abort
+ # Make sure there is nothing in the db.
+ txn011_check_empty $ndb $env
+ check_log_records $testdir
+
+ puts "\tTxn011.f: Commit txns in non-durable db."
+ txn011_runtxns $ntxns $ndb $env commit
+ check_log_records $testdir
+
+ # Open/create the durable database
+ set durfile durable.db
+ set ddb [eval berkdb_open_noerr \
+ -create -auto_commit -env $env -btree $durfile]
+ error_check_good dbopen [is_valid_db $ddb] TRUE
+
+ # Try to get a not-durable handle on the durable db.
+ puts "\tTxn011.g: Try to get a not-durable handle on\
+ an open durable db."
+ set errormsg "Cannot open DURABLE and NOT DURABLE handles"
+ catch {berkdb_open_noerr \
+ -auto_commit -env $env -notdurable $durfile} res
+ error_check_good handle_error1 [is_substr $res $errormsg] 1
+ error_check_good ddb_close [$ddb close] 0
+
+ # Try to get a not-durable handle when reopening the durable
+ # db (this should work).
+ set db [berkdb_open_noerr \
+ -auto_commit -env $env -notdurable $durfile]
+ error_check_good db_reopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Now reopen as durable for the remainder of the test.
+ set ddb [berkdb_open_noerr \
+ -auto_commit -env $env -btree $durfile]
+ error_check_good dbopen [is_valid_db $ddb] TRUE
+
+ puts "\tTxn011.h: Abort txns in durable db."
+ # Add items to db in several txns but abort every one.
+ txn011_runtxns $ntxns $ddb $env abort
+ # Make sure there is nothing in the db.
+ txn011_check_empty $ddb $env
+
+ puts "\tTxn011.i: Commit txns in durable db."
+ txn011_runtxns $ntxns $ddb $env commit
+
+ puts "\tTxn011.j: Subdbs must all be durable or all not durable."
+ # Ask for -notdurable on durable db/subdb
+ set sdb1 [eval berkdb_open_noerr -create -auto_commit \
+ -env $env -btree testfile1.db subdb1]
+ catch {set sdb2 [eval berkdb_open_noerr -create -auto_commit \
+ -env $env -btree -notdurable testfile1.db subdb2]} res
+ error_check_good same_type_subdb1 [is_substr $res $errormsg] 1
+ error_check_good sdb1_close [$sdb1 close] 0
+
+ # Ask for durable on notdurable db/subdb
+ set sdb3 [eval berkdb_open_noerr -create -auto_commit \
+ -env $env -btree -notdurable testfile2.db subdb3]
+ catch {set sdb4 [eval berkdb_open_noerr -create -auto_commit \
+ -env $env -btree testfile2.db subdb4]} res
+ error_check_good same_type_subdb2 [is_substr $res $errormsg] 1
+ error_check_good sdb3_close [$sdb3 close] 0
+
+ puts "\tTxn011.k: Try to get a durable handle on a\
+ not-durable db."
+ # Try to get a durable handle on a not-durable database,
+ # while open. This should fail, but getting a durable handle
+ # when re-opening should work.
+ catch {berkdb_open_noerr -auto_commit -env $env $nondurfile} res
+ error_check_good handle_error [is_substr $res $errormsg] 1
+ error_check_good ndb_close [$ndb close] 0
+
+ set ndb [berkdb_open_noerr -auto_commit -env $env $nondurfile]
+ error_check_good ndb_reopen [is_valid_db $ndb] TRUE
+ error_check_good ndb_close [$ndb close] 0
+
+ # Clean up mixed env.
+ error_check_good ddb_close [$ddb close] 0
+ error_check_good env_close [$env close] 0
+ }
+}
+
+proc txn011_runtxns { ntxns db env end } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set i 0
+ while { [gets $did str] != -1 && $i < $ntxns } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ error_check_good db_put_txn [$db put -txn $txn $i $str] 0
+ error_check_good txn_$end [$txn $end] 0
+ incr i
+ }
+ close $did
+}
+
+# Verify that a database is empty
+proc txn011_check_empty { db env } {
+ # Start a transaction
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+
+ # If a cursor get -first returns nothing, the db is empty.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -first]
+ error_check_good get_on_empty [string length $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+
+ # End transaction
+ error_check_good txn [$t commit] 0
+}
+
+# Some log records are still produced when we run create in a
+# non-durable db in a regular env. Just make sure we don't see
+# any unexpected types.
+proc check_log_records { dir } {
+ global util_path
+
+ set tmpfile $dir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+
+ set f [open $tmpfile r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 && [string match *_debug $name] != 1 && \
+ [string match __txn_regop $name] != 1 && \
+ [string match __txn_child $name] != 1 } {
+ puts "FAIL: unexpected log record $name found"
+ }
+ }
+ close $f
+ fileremove $tmpfile
+}
diff --git a/db-4.8.30/test/txn012.tcl b/db-4.8.30/test/txn012.tcl
new file mode 100644
index 0000000..64da3a0
--- /dev/null
+++ b/db-4.8.30/test/txn012.tcl
@@ -0,0 +1,61 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn012
+# TEST Test txn->getname and txn->setname.
+
+proc txn012 { {ntxns 100} } {
+ source ./include.tcl
+ global util_path
+
+ puts "Txn012: Test txn->setname and txn->getname."
+ env_cleanup $testdir
+ set txnname "this is a short txn name"
+ set longtxnname "transaction names longer than 50 characters will be truncated"
+
+ puts "\tTxn012.a: Set up env and txn."
+ set env [berkdb_env -create -home $testdir -txn]
+ set db [berkdb_open -create -auto_commit -btree -env $env test.db]
+ set txn0 [$env txn]
+ set txn1 [$env txn]
+
+ # Name the transactions, check the name.
+ error_check_good name_txn0 [$txn0 setname $txnname] 0
+ set getname [$txn0 getname]
+ error_check_good txnname $getname $txnname
+
+ error_check_good longname_txn [$txn1 setname $longtxnname] 0
+ set getlongname [$txn1 getname]
+ error_check_good longtxnname $getlongname $longtxnname
+
+ # Run db_stat. The long txn name will be truncated.
+ set stat [exec $util_path/db_stat -h $testdir -t]
+ error_check_good stat_name [is_substr $stat $txnname] 1
+ error_check_good stat_longname [is_substr $stat $longtxnname] 0
+ set truncname [string range $longtxnname 0 49]
+ error_check_good stat_truncname [is_substr $stat $truncname] 1
+
+ # Start another process and make sure it can see the names too.
+ puts "\tTxn012.b: Fork child process."
+ set pid [exec $tclsh_path $test_path/wrap.tcl txn012script.tcl \
+ $testdir/txn012script.log $testdir $txnname $longtxnname &]
+
+ watch_procs $pid 1
+
+ error_check_good txn0_commit [$txn0 commit] 0
+ error_check_good txn1_commit [$txn1 commit] 0
+
+ # Check for errors in child log file.
+ set errstrings [eval findfail $testdir/txn012script.log]
+ foreach str $errstrings {
+ puts "FAIL: error message in log file: $str"
+ }
+
+ # Clean up.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
+
diff --git a/db-4.8.30/test/txn012script.tcl b/db-4.8.30/test/txn012script.tcl
new file mode 100644
index 0000000..8c935e4
--- /dev/null
+++ b/db-4.8.30/test/txn012script.tcl
@@ -0,0 +1,33 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Script to check that txn names can be seen across processes.
+# Names over 50 characters will be truncated.
+#
+# Usage: txn012script dir txnname longtxnname
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "txn012script dir txnname longtxnname"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [ lindex $argv 0 ]
+set txnname [ lindex $argv 1 ]
+set longtxnname [ lindex $argv 2 ]
+
+# Run db_stat to view txn names.
+set stat [exec $util_path/db_stat -h $dir -t]
+error_check_good txnname [is_substr $stat $txnname] 1
+error_check_good longtxnname [is_substr $stat $longtxnname] 0
+set truncname [string range $longtxnname 0 49]
+error_check_good truncname [is_substr $stat $truncname] 1
diff --git a/db-4.8.30/test/txn013.tcl b/db-4.8.30/test/txn013.tcl
new file mode 100644
index 0000000..1ac998c
--- /dev/null
+++ b/db-4.8.30/test/txn013.tcl
@@ -0,0 +1,76 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn013
+# TEST Test of txns used in the wrong environment.
+# TEST Set up two envs. Start a txn in one env, and attempt to use it
+# TEST in the other env. Verify we get the appropriate error message.
+proc txn013 { } {
+ source ./include.tcl
+
+ set tnum "013"
+ puts "Txn$tnum: Test use of txns in wrong environment."
+ set testfile FILE.db
+ set key KEY
+ set data DATA
+
+ env_cleanup $testdir
+
+ puts "\tTxn$tnum.a: Create two environments."
+ set env1 [berkdb_env_noerr -create -mode 0644 -txn -home $testdir]
+ file mkdir $testdir/SUBDIR
+ set env2 \
+ [berkdb_env_noerr -create -mode 0644 -txn -home $testdir/SUBDIR]
+ error_check_good env1 [is_valid_env $env1] TRUE
+ error_check_good env2 [is_valid_env $env2] TRUE
+
+ # Open a database in each environment.
+ puts "\tTxn$tnum.b: Open a database in each environment."
+ set db1 [berkdb_open_noerr \
+ -env $env1 -create -auto_commit -btree $testfile]
+ set db2 [berkdb_open_noerr \
+ -env $env2 -create -auto_commit -btree $testfile]
+
+ # Create txns in both environments.
+ puts "\tTxn$tnum.c: Start a transaction in each environment."
+ set txn1 [$env1 txn]
+ set txn2 [$env2 txn]
+ error_check_good txn1_begin [is_valid_txn $txn1 $env1] TRUE
+ error_check_good txn2_begin [is_valid_txn $txn2 $env2] TRUE
+
+ # First do the puts in the correct envs, so we have something
+ # for the gets and deletes.
+ error_check_good txn1_env1 [$db1 put -txn $txn1 $key $data] 0
+ error_check_good txn2_env2 [$db2 put -txn $txn2 $key $data] 0
+
+ puts "\tTxn$tnum.d: Execute db put in wrong environment."
+ set errormsg "from different environments"
+ catch {$db1 put -txn $txn2 $key $data} res
+ error_check_good put_env1txn2 [is_substr $res $errormsg] 1
+ catch {$db2 put -txn $txn1 $key $data} res
+ error_check_good put_env2txn1 [is_substr $res $errormsg] 1
+
+ puts "\tTxn$tnum.e: Execute db get in wrong environment."
+ catch {$db1 get -txn $txn2 $key} res
+ error_check_good get_env1txn2 [is_substr $res $errormsg] 1
+ catch {$db2 get -txn $txn1 $key} res
+ error_check_good get_env2txn1 [is_substr $res $errormsg] 1
+
+ puts "\tTxn$tnum.f: Execute db del in wrong environment."
+ catch {$db1 del -txn $txn2 $key} res
+ error_check_good get_env1txn2 [is_substr $res $errormsg] 1
+ catch {$db2 del -txn $txn1 $key} res
+ error_check_good get_env2txn1 [is_substr $res $errormsg] 1
+
+ # Clean up.
+ error_check_good txn1_commit [$txn1 commit] 0
+ error_check_good txn2_commit [$txn2 commit] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ error_check_good env1_close [$env1 close] 0
+ error_check_good env2_close [$env2 close] 0
+}
+
diff --git a/db-4.8.30/test/txn014.tcl b/db-4.8.30/test/txn014.tcl
new file mode 100644
index 0000000..5180320
--- /dev/null
+++ b/db-4.8.30/test/txn014.tcl
@@ -0,0 +1,158 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2005-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# TEST txn014
+# TEST Test of parent and child txns working on the same database.
+# TEST A txn that will become a parent create a database.
+# TEST A txn that will not become a parent creates another database.
+# TEST Start a child txn of the 1st txn.
+# TEST Verify that the parent txn is disabled while child is open.
+# TEST 1. Child reads contents with child handle (should succeed).
+# TEST 2. Child reads contents with parent handle (should succeed).
+# TEST Verify that the non-parent txn can read from its database,
+# TEST and that the child txn cannot.
+# TEST Return to the child txn.
+# TEST 3. Child writes with child handle (should succeed).
+# TEST 4. Child writes with parent handle (should succeed).
+# TEST
+# TEST Commit the child, verify that the parent can write again.
+# TEST Check contents of database with a second child.
+proc txn014 { } {
+ source ./include.tcl
+ global default_pagesize
+
+ set page_size $default_pagesize
+ # If the page size is very small, we increase page size,
+ # so we won't run out of lockers.
+ if { $page_size < 2048 } {
+ set page_size 2048
+ }
+ set tnum "014"
+ puts "Txn$tnum: Test use of parent and child txns."
+ set parentfile test$tnum.db
+ set nonparentfile test$tnum.db.2
+ set method "-btree"
+
+ # Use 5000 entries so there will be new items on the wordlist
+ # when we double nentries in part h.
+ set nentries 5000
+
+ env_cleanup $testdir
+
+ puts "\tTxn$tnum.a: Create environment."
+ set eflags "-create -mode 0644 -txn -home $testdir"
+ set env [eval {berkdb_env_noerr} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # Open a database with parent txn and populate. We populate
+ # before starting up the child txn, because the only allowed
+ # Berkeley DB calls for a parent txn are beginning child txns,
+ # committing, or aborting.
+
+ puts "\tTxn$tnum.b: Start parent txn and open database."
+ set parent [$env txn]
+ error_check_good parent_begin [is_valid_txn $parent $env] TRUE
+ set db [berkdb_open_noerr -pagesize $page_size \
+ -env $env -txn $parent -create $method $parentfile]
+ populate $db $method $parent $nentries 0 0
+
+ puts "\tTxn$tnum.c: Start non-parent txn and open database."
+ set nonparent [$env txn]
+ error_check_good nonparent_begin [is_valid_txn $nonparent $env] TRUE
+ set db2 [berkdb_open_noerr -pagesize $page_size \
+ -env $env -txn $nonparent -create $method $nonparentfile]
+ populate $db2 $method $nonparent $nentries 0 0
+
+ # Start child txn and open database. Parent txn is not yet
+ # committed, but the child should be able to read what's there.
+ # The child txn should also be able to use the parent txn.
+
+ puts "\tTxn$tnum.d: Start child txn."
+ set child [$env txn -parent $parent]
+
+ puts "\tTxn$tnum.e: Verify parent is disabled."
+ catch {$db put -txn $parent a a} ret
+ error_check_good \
+ parent_disabled [is_substr $ret "Child transaction is active"] 1
+
+ puts "\tTxn$tnum.f: Get a handle on parent's database using child txn."
+ set childdb [berkdb_open_noerr -pagesize $page_size \
+ -env $env -txn $child $method $parentfile]
+
+ puts "\tTxn$tnum.g: Read database with child txn/child handle,"
+ puts "\tTxn$tnum.g: and with child txn/parent handle."
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+
+ # First use child's handle.
+ set ret [$childdb get -txn $child $key]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ # Have the child use the parent's handle.
+ set ret [$db get -txn $child $key]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+
+ # Read the last key from the non-parent database, then try
+ # to read the same key using the child txn. It will fail.
+ puts "\tTxn$tnum.h: Child cannot read data from non-parent."
+ set ret [$db2 get -txn $nonparent $key]
+
+ # Check the return against $key, because $str has gone on to
+ # the next item in the wordlist.
+ error_check_good \
+ np_get $ret [list [list $key [pad_data $method $key]]]
+ catch {$db2 get -txn $child $key} ret
+ error_check_good \
+ child_np_get [is_substr $ret "is still active"] 1
+
+ # The child should also be able to update the database, using
+ # either handle.
+ puts "\tTxn$tnum.i: Write to database with child txn & child handle."
+ populate $childdb $method $child $nentries 0 0
+ puts "\tTxn$tnum.j: Write to database with child txn & parent handle."
+ populate $db $method $child $nentries 0 0
+
+ puts "\tTxn$tnum.k: Commit child, freeing parent."
+ error_check_good child_commit [$child commit] 0
+ error_check_good childdb_close [$childdb close] 0
+
+ puts "\tTxn$tnum.l: Add more entries to db using parent txn."
+ set nentries [expr $nentries * 2]
+ populate $db $method $parent $nentries 0 0
+
+ puts "\tTxn$tnum.m: Start new child txn and read database."
+ set child2 [$env txn -parent $parent]
+ set child2db [berkdb_open_noerr -pagesize $page_size \
+ -env $env -txn $child2 $method $parentfile]
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set ret [$child2db get -txn $child2 $key]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]] 1
+ incr count
+ }
+ close $did
+
+ puts "\tTxn$tnum.n: Clean up."
+ error_check_good child2_commit [$child2 commit] 0
+ error_check_good nonparent_commit [$nonparent commit] 0
+ error_check_good parent_commit [$parent commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db2_close [$db2 close] 0
+ error_check_good childdb_close [$child2db close] 0
+ error_check_good env_close [$env close] 0
+}
+
diff --git a/db-4.8.30/test/txnscript.tcl b/db-4.8.30/test/txnscript.tcl
new file mode 100644
index 0000000..4d04365
--- /dev/null
+++ b/db-4.8.30/test/txnscript.tcl
@@ -0,0 +1,66 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Txn003 script - outstanding child prepare script
+# Usage: txnscript envcmd dbcmd gidf key data
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# key: key to use
+# data: new data to use
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "txnscript envcmd dbfile gidfile key data"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 1
+set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+puts "\tTxnscript.a: begin parent and child txn"
+set parent [$dbenv txn]
+error_check_good parent [is_valid_txn $parent $dbenv] TRUE
+set child [$dbenv txn -parent $parent]
+error_check_good parent [is_valid_txn $child $dbenv] TRUE
+
+puts "\tTxnscript.b: Modify data"
+error_check_good db_put [$db put -txn $child $key $data] 0
+
+set gfd [open $gidfile w+]
+set gid [make_gid txnscript:$parent]
+puts $gfd $gid
+puts "\tTxnscript.c: Prepare parent only"
+error_check_good txn_prepare:$parent [$parent prepare $gid] 0
+close $gfd
+
+puts "\tTxnscript.d: Check child handle"
+set stat [catch {$child abort} ret]
+error_check_good child_handle $stat 1
+error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tTxnscript completed successfully"
+flush stdout
diff --git a/db-4.8.30/test/update.tcl b/db-4.8.30/test/update.tcl
new file mode 100644
index 0000000..8b90501
--- /dev/null
+++ b/db-4.8.30/test/update.tcl
@@ -0,0 +1,92 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+
+source ./include.tcl
+global update_dir
+set update_dir "$test_path/update_test"
+
+proc update { } {
+ source ./include.tcl
+ global update_dir
+
+ foreach version [glob $update_dir/*] {
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $update_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $update_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+ foreach endianness {"le" "be"} {
+ puts "Update:\
+ $version $method $name $endianness"
+ set ret [catch {_update $update_dir $testdir $version $method $name $endianness 1 1} message]
+ if { $ret != 0 } {
+ puts $message
+ }
+ }
+ }
+ }
+ }
+}
+
+proc _update { source_dir temp_dir \
+ version method file endianness do_db_load_test do_update_test } {
+ source include.tcl
+ global errorInfo
+
+ cleanup $temp_dir NULL
+
+ exec sh -c \
+"gzcat $source_dir/$version/$method/$file.tar.gz | (cd $temp_dir && tar xf -)"
+
+ if { $do_db_load_test } {
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update load: $version $method $file $message" $ret 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f "$temp_dir/update.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update dump: $version $method $file $message" $ret 0
+
+ error_check_good "Update diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.1.2: $version $method $file" $ret ""
+ }
+
+ if { $do_update_test } {
+ set ret [catch \
+ {berkdb open -update "$temp_dir/$file-$endianness.db"} db]
+ if { $ret == 1 } {
+ if { ![is_substr $errorInfo "version upgrade"] } {
+ set fnl [string first "\n" $errorInfo]
+ set theError \
+ [string range $errorInfo 0 [expr $fnl - 1]]
+ error $theError
+ }
+ } else {
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f \
+ "$temp_dir/update.dump" \
+ "$temp_dir/$file-$endianness.db"} message]
+ error_check_good "Update\
+ dump: $version $method $file $message" $ret 0
+
+ error_check_good \
+ "Update diff.2: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" \
+ "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.2: $version $method $file" $ret ""
+ }
+ }
+}
diff --git a/db-4.8.30/test/upgrade.tcl b/db-4.8.30/test/upgrade.tcl
new file mode 100644
index 0000000..96c5f1d
--- /dev/null
+++ b/db-4.8.30/test/upgrade.tcl
@@ -0,0 +1,855 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2009 Oracle. All rights reserved.
+#
+# $Id$
+
+source ./include.tcl
+
+global upgrade_dir
+# set upgrade_dir "$test_path/upgrade_test"
+set upgrade_dir "$test_path/upgrade/databases"
+
+global gen_upgrade
+set gen_upgrade 0
+global gen_dump
+set gen_dump 0
+global gen_chksum
+set gen_chksum 0
+global gen_upgrade_log
+set gen_upgrade_log 0
+
+global upgrade_dir
+global upgrade_be
+global upgrade_method
+global upgrade_name
+
+proc upgrade { { archived_test_loc "DEFAULT" } } {
+ source ./include.tcl
+ global test_names
+ global upgrade_dir
+ global tcl_platform
+ global saved_logvers
+
+ set saved_upgrade_dir $upgrade_dir
+
+ # Identify endianness of the machine running upgrade.
+ if { [big_endian] == 1 } {
+ set myendianness be
+ } else {
+ set myendianness le
+ }
+ set e $tcl_platform(byteOrder)
+
+ if { [file exists $archived_test_loc/logversion] == 1 } {
+ set fd [open $archived_test_loc/logversion r]
+ set saved_logvers [read $fd]
+ close $fd
+ } else {
+ puts "Old log version number must be available \
+ in $archived_test_loc/logversion"
+ return
+ }
+
+ fileremove -f UPGRADE.OUT
+ set o [open UPGRADE.OUT a]
+
+ puts -nonewline $o "Upgrade test started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ puts $o "Testing $e files"
+
+ puts -nonewline "Upgrade test started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+ puts "Testing $e files"
+
+ if { $archived_test_loc == "DEFAULT" } {
+ puts $o "Using default archived databases in $upgrade_dir."
+ puts "Using default archived databases in $upgrade_dir."
+ } else {
+ set upgrade_dir $archived_test_loc
+ puts $o "Using archived databases in $upgrade_dir."
+ puts "Using archived databases in $upgrade_dir."
+ }
+ close $o
+
+ foreach version [glob $upgrade_dir/*] {
+ if { [string first CVS $version] != -1 } { continue }
+ regexp \[^\/\]*$ $version version
+
+ # Test only files where the endianness of the db matches
+ # the endianness of the test platform. These are the
+ # meaningful tests:
+ # 1. File generated on le, tested on le
+ # 2. File generated on be, tested on be
+ # 3. Byte-swapped file generated on le, tested on be
+ # 4. Byte-swapped file generated on be, tested on le
+ #
+ set dbendianness [string range $version end-1 end]
+ if { [string compare $myendianness $dbendianness] != 0 } {
+ puts "Skipping test of $version \
+ on $myendianness platform."
+ } else {
+ set release [string trim $version -lbe]
+ set o [open UPGRADE.OUT a]
+ puts $o "Files created on release $release"
+ close $o
+ puts "Files created on release $release"
+
+ foreach method [glob $upgrade_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ set o [open UPGRADE.OUT a]
+ puts $o "\nTesting $method files"
+ close $o
+ puts "\tTesting $method files"
+
+ foreach file [lsort -dictionary \
+ [glob -nocomplain \
+ $upgrade_dir/$version/$method/*]] {
+ regexp (\[^\/\]*)\.tar\.gz$ \
+ $file dummy name
+
+ cleanup $testdir NULL 1
+ set curdir [pwd]
+ cd $testdir
+ set tarfd [open "|tar xf -" w]
+ cd $curdir
+
+ catch {exec gunzip -c \
+ "$upgrade_dir/$version/$method/$name.tar.gz" \
+ >@$tarfd}
+ close $tarfd
+
+ set f [open $testdir/$name.tcldump \
+ {RDWR CREAT}]
+ close $f
+
+ # We exec a separate tclsh for each
+ # separate subtest to keep the
+ # testing process from consuming a
+ # tremendous amount of memory.
+ #
+ # First we test the .db files.
+ if { [file exists \
+ $testdir/$name-$myendianness.db] } {
+ if { [catch {exec $tclsh_path \
+ << "source \
+ $test_path/test.tcl;\
+ _upgrade_test $testdir \
+ $version $method $name \
+ $myendianness" >>& \
+ UPGRADE.OUT } message] } {
+ set o [open \
+ UPGRADE.OUT a]
+ puts $o "FAIL: $message"
+ close $o
+ }
+ if { [catch {exec $tclsh_path\
+ << "source \
+ $test_path/test.tcl;\
+ _db_load_test $testdir \
+ $version $method $name" >>&\
+ UPGRADE.OUT } message] } {
+ set o [open \
+ UPGRADE.OUT a]
+ puts $o "FAIL: $message"
+ close $o
+ }
+ }
+ # Then we test log files.
+ if { [file exists \
+ $testdir/$name.prlog] } {
+ if { [catch {exec $tclsh_path \
+ << "source \
+ $test_path/test.tcl;\
+ global saved_logvers;\
+ set saved_logvers \
+ $saved_logvers;\
+ _log_test $testdir \
+ $release $method \
+ $name" >>& \
+ UPGRADE.OUT } message] } {
+ set o [open \
+ UPGRADE.OUT a]
+ puts $o "FAIL: $message"
+ close $o
+ }
+ }
+
+ # Then we test any .dmp files. Move
+ # the saved file to the current working
+ # directory. Run the test locally.
+ # Compare the dumps; they should match.
+ if { [file exists $testdir/$name.dmp] } {
+ file rename -force \
+ $testdir/$name.dmp $name.dmp
+
+ foreach test $test_names(plat) {
+ eval $test $method
+ }
+
+ # Discard lines that can differ.
+ discardline $name.dmp \
+ TEMPFILE "db_pagesize="
+ file copy -force \
+ TEMPFILE $name.dmp
+ discardline $testdir/$test.dmp \
+ TEMPFILE "db_pagesize="
+ file copy -force \
+ TEMPFILE $testdir/$test.dmp
+
+ error_check_good compare_dump \
+ [filecmp $name.dmp \
+ $testdir/$test.dmp] 0
+
+ fileremove $name.dmp
+ }
+ }
+ }
+ }
+ }
+ set upgrade_dir $saved_upgrade_dir
+
+ set o [open UPGRADE.OUT a]
+ puts -nonewline $o "Completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+
+ puts -nonewline "Completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+
+ # Don't provide a return value.
+ return
+}
+
+proc _upgrade_test { temp_dir version method file endianness } {
+ source include.tcl
+ global errorInfo
+ global passwd
+ global encrypt
+
+ puts "Upgrade: $version $method $file $endianness"
+
+ # Check whether we're working with an encrypted file.
+ if { [string match c-* $file] } {
+ set encrypt 1
+ }
+
+ # Open the database prior to upgrading. If it fails,
+ # it should fail with the DB_OLDVERSION message.
+ set encargs ""
+ set upgradeargs ""
+ if { $encrypt == 1 } {
+ set encargs " -encryptany $passwd "
+ set upgradeargs " -P $passwd "
+ }
+ if { [catch \
+ { set db [eval {berkdb open} $encargs \
+ $temp_dir/$file-$endianness.db] } res] } {
+ error_check_good old_version [is_substr $res DB_OLDVERSION] 1
+ } else {
+ error_check_good db_close [$db close] 0
+ }
+
+ # Now upgrade the database.
+ set ret [catch {eval exec {$util_path/db_upgrade} $upgradeargs \
+ "$temp_dir/$file-$endianness.db" } message]
+ error_check_good dbupgrade $ret 0
+
+ error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
+
+ upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.$endianness: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc _db_load_test { temp_dir version method file } {
+ source include.tcl
+ global errorInfo
+
+ puts "Db_load: $version $method $file"
+
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/upgrade.db"} message]
+ error_check_good \
+ "Upgrade load: $version $method $file $message" $ret 0
+
+ upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc _log_test { temp_dir release method file } {
+ source ./include.tcl
+ global saved_logvers
+ global passwd
+ puts "Check log file: $temp_dir $release $method $file"
+
+ # Get log version number of current system
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+ set current_logvers [get_log_vers $env]
+ error_check_good env_close [$env close] 0
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+
+ # Rename recd001-x-log.000000000n to log.000000000n.
+ set logfiles [glob -nocomplain $temp_dir/*log.0*]
+ foreach logfile $logfiles {
+ set logname [string replace $logfile 0 \
+ [string last - $logfile]]
+ file rename -force $logfile $temp_dir/$logname
+ }
+
+ # Use db_printlog to dump the logs. If the current log file
+ # version is greater than the saved log file version, the log
+ # files are expected to be unreadable. If the log file is
+ # readable, check that the current printlog dump matches the
+ # archived printlog.
+ #
+ set ret [catch {exec $util_path/db_printlog -h $temp_dir \
+ > $temp_dir/logs.prlog} message]
+ if { [is_substr $message "magic number"] } {
+ # The failure is probably due to encryption, try
+ # crypto printlog.
+ set ret [catch {exec $util_path/db_printlog -h $temp_dir \
+ -P $passwd > $temp_dir/logs.prlog} message]
+ if { $ret == 1 } {
+ # If the failure is because of a historic
+ # log version, that's okay.
+ if { $current_logvers <= $saved_logvers } {
+ puts "db_printlog failed: $message"
+ }
+ }
+ }
+
+ # Log versions prior to 8 can only be read by their own version.
+ # Log versions of 8 or greater are readable by Berkeley DB 4.5
+ # or greater, but the output of printlog does not match unless
+ # the versions are identical.
+ #
+ # As of Berkeley DB 4.8, we'll only try to read back to log
+ # version 11, which came out with 4.4. Backwards compatibility
+ # now only extends back to 4.4 because of page changes.
+ #
+ set logoldver 11
+ if { $current_logvers > $saved_logvers &&\
+ $saved_logvers < $logoldver } {
+ error_check_good historic_log_version \
+ [is_substr $message "historic log version"] 1
+ } elseif { $current_logvers > $saved_logvers } {
+ error_check_good db_printlog:$message $ret 0
+ } elseif { $current_logvers == $saved_logvers } {
+ error_check_good db_printlog:$message $ret 0
+ # Compare logs.prlog and $file.prlog (should match)
+ error_check_good "Compare printlogs" [filecmp \
+ "$temp_dir/logs.prlog" "$temp_dir/$file.prlog"] 0
+ } elseif { $current_logvers < $saved_logvers } {
+ puts -nonewline "FAIL: current log version $current_logvers "
+ puts "cannot be less than saved log version $save_logvers."
+ }
+}
+
+proc gen_upgrade { dir { save_crypto 1 } { save_non_crypto 1 } } {
+ global gen_upgrade
+ global gen_upgrade_log
+ global gen_chksum
+ global gen_dump
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ global valid_methods
+ global test_names
+ global parms
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set upgrade_dir $dir
+ env_cleanup $testdir
+
+ fileremove -f GENERATE.OUT
+ set o [open GENERATE.OUT a]
+
+ puts -nonewline $o "Generating upgrade files. Started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+
+ puts -nonewline "Generating upgrade files. Started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ close $o
+
+ # Create a file that contains the log version number.
+ # If necessary, create the directory to contain the file.
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good is_valid_env [is_valid_env $env] TRUE
+
+ if { [file exists $dir] == 0 } {
+ file mkdir $dir
+ }
+ set lv [open $dir/logversion w]
+ puts $lv [get_log_vers $env]
+ close $lv
+
+ error_check_good env_close [$env close] 0
+
+ # Generate test databases for each access method and endianness.
+ foreach method $valid_methods {
+ set o [open GENERATE.OUT a]
+ puts $o "\nGenerating $method files"
+ close $o
+ puts "\tGenerating $method files"
+ set upgrade_method $method
+
+ # We piggyback testing of dumped sequence files on upgrade
+ # testing because this is the only place that we ship files
+ # from one machine to another. Create files for both
+ # endiannesses, because who knows what platform we'll
+ # be testing on.
+
+ set gen_dump 1
+ foreach test $test_names(plat) {
+ set upgrade_name $test
+ foreach upgrade_be { 0 1 } {
+ eval $test $method
+ cleanup $testdir NULL
+ }
+ }
+ set gen_dump 0
+
+#set test_names(test) ""
+ set gen_upgrade 1
+ foreach test $test_names(test) {
+ if { [info exists parms($test)] != 1 } {
+ continue
+ }
+
+ set o [open GENERATE.OUT a]
+ puts $o "\t\tGenerating files for $test"
+ close $o
+ puts "\t\tGenerating files for $test"
+
+ if { $save_non_crypto == 1 } {
+ set encrypt 0
+ foreach upgrade_be { 0 1 } {
+ set upgrade_name $test
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl;\
+ global gen_upgrade upgrade_be;\
+ global upgrade_method upgrade_name;\
+ global encrypt;\
+ set encrypt $encrypt;\
+ set gen_upgrade 1;\
+ set upgrade_be $upgrade_be;\
+ set upgrade_method $upgrade_method;\
+ set upgrade_name $upgrade_name;\
+ run_method -$method $test" \
+ >>& GENERATE.OUT} res] {
+ puts "FAIL: run_method \
+ $test $method"
+ }
+ cleanup $testdir NULL 1
+ }
+ # Save checksummed files for only one test.
+ # Checksumming should work in all or no cases.
+ set gen_chksum 1
+ foreach upgrade_be { 0 1 } {
+ set upgrade_name $test
+ if { $test == "test001" } {
+ if { [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl;\
+ global gen_upgrade;\
+ global upgrade_be;\
+ global upgrade_method;\
+ global upgrade_name;\
+ global encrypt gen_chksum;\
+ set encrypt $encrypt;\
+ set gen_upgrade 1;\
+ set gen_chksum 1;\
+ set upgrade_be $upgrade_be;\
+ set upgrade_method \
+ $upgrade_method;\
+ set upgrade_name \
+ $upgrade_name;\
+ run_method -$method $test \
+ 0 1 stdout -chksum" \
+ >>& GENERATE.OUT} res] } {
+ puts "FAIL: run_method \
+ $test $method \
+ -chksum: $res"
+ }
+ cleanup $testdir NULL 1
+ }
+ }
+ set gen_chksum 0
+ }
+ # Save encrypted db's only of native endianness.
+ # Encrypted files are not portable across endianness.
+ if { $save_crypto == 1 } {
+ set upgrade_be [big_endian]
+ set encrypt 1
+ set upgrade_name $test
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl;\
+ global gen_upgrade upgrade_be;\
+ global upgrade_method upgrade_name;\
+ global encrypt passwd;\
+ set encrypt $encrypt;\
+ set passwd $passwd;\
+ set gen_upgrade 1;\
+ set upgrade_be $upgrade_be;\
+ set upgrade_method $upgrade_method;\
+ set upgrade_name $upgrade_name;\
+ run_secmethod $method $test" \
+ >>& GENERATE.OUT} res] {
+ puts "FAIL: run_secmethod \
+ $test $method"
+ }
+ cleanup $testdir NULL 1
+ }
+ }
+ set gen_upgrade 0
+ }
+
+ # Set upgrade_be to the native value so log files go to the
+ # right place.
+ set upgrade_be [big_endian]
+
+ # Generate log files.
+ set o [open GENERATE.OUT a]
+ puts $o "\tGenerating log files"
+ close $o
+ puts "\tGenerating log files"
+
+ set gen_upgrade_log 1
+ # Pass the global variables and their values to the new tclsh.
+ if { $save_non_crypto == 1 } {
+ set encrypt 0
+ if [catch {exec $tclsh_path << "source $test_path/test.tcl;\
+ global gen_upgrade_log upgrade_be upgrade_dir;\
+ global encrypt;\
+ set encrypt $encrypt;\
+ set gen_upgrade_log $gen_upgrade_log; \
+ set upgrade_be $upgrade_be;\
+ set upgrade_dir $upgrade_dir;\
+ run_recds" >>& GENERATE.OUT} res] {
+ puts "FAIL: run_recds: $res"
+ }
+ }
+ if { $save_crypto == 1 } {
+ set encrypt 1
+ if [catch {exec $tclsh_path << "source $test_path/test.tcl;\
+ global gen_upgrade_log upgrade_be upgrade_dir;\
+ global encrypt;\
+ set encrypt $encrypt;\
+ set gen_upgrade_log $gen_upgrade_log; \
+ set upgrade_be $upgrade_be;\
+ set upgrade_dir $upgrade_dir;\
+ run_recds " >>& GENERATE.OUT} res] {
+ puts "FAIL: run_recds with crypto: $res"
+ }
+ }
+ set gen_upgrade_log 0
+
+ set o [open GENERATE.OUT a]
+ puts -nonewline $o "Completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline "Completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+proc save_upgrade_files { dir } {
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ global gen_upgrade
+ global gen_upgrade_log
+ global gen_dump
+ global encrypt
+ global gen_chksum
+ global passwd
+ source ./include.tcl
+
+ set vers [berkdb version]
+ set maj [lindex $vers 0]
+ set min [lindex $vers 1]
+
+ # Is this machine big or little endian? We want to mark
+ # the test directories appropriately, since testing
+ # little-endian databases generated by a big-endian machine,
+ # and/or vice versa, is interesting.
+ if { [big_endian] } {
+ set myendianness be
+ } else {
+ set myendianness le
+ }
+
+ if { $upgrade_be == 1 } {
+ set version_dir "$myendianness-$maj.${min}be"
+ set en be
+ } else {
+ set version_dir "$myendianness-$maj.${min}le"
+ set en le
+ }
+
+ set dest $upgrade_dir/$version_dir/$upgrade_method
+ exec mkdir -p $dest
+
+ if { $gen_upgrade == 1 } {
+ # Save db files from test001 - testxxx.
+ set dbfiles [glob -nocomplain $dir/*.db]
+ set dumpflag ""
+ # Encrypted files are identified by the prefix "c-".
+ if { $encrypt == 1 } {
+ set upgrade_name c-$upgrade_name
+ set dumpflag " -P $passwd "
+ }
+ # Checksummed files are identified by the prefix "s-".
+ if { $gen_chksum == 1 } {
+ set upgrade_name s-$upgrade_name
+ }
+ foreach dbfile $dbfiles {
+ set basename [string range $dbfile \
+ [expr [string length $dir] + 1] end-3]
+
+ set newbasename $upgrade_name-$basename
+
+ # db_dump file
+ if { [catch {eval exec $util_path/db_dump -k $dumpflag \
+ $dbfile > $dir/$newbasename.dump} res] } {
+ puts "FAIL: $res"
+ }
+
+ # tcl_dump file
+ upgrade_dump $dbfile $dir/$newbasename.tcldump
+
+ # Rename dbfile and any dbq files.
+ file rename $dbfile $dir/$newbasename-$en.db
+ foreach dbq \
+ [glob -nocomplain $dir/__dbq.$basename.db.*] {
+ set s [string length $dir/__dbq.]
+ set newname [string replace $dbq $s \
+ [expr [string length $basename] + $s - 1] \
+ $newbasename-$en]
+ file rename $dbq $newname
+ }
+ set cwd [pwd]
+ cd $dir
+ catch {eval exec tar -cvf $dest/$newbasename.tar \
+ [glob $newbasename* __dbq.$newbasename-$en.db.*]}
+ catch {exec gzip -9v $dest/$newbasename.tar} res
+ cd $cwd
+ }
+ }
+
+ if { $gen_upgrade_log == 1 } {
+ # Save log files from recd tests.
+ set logfiles [glob -nocomplain $dir/log.*]
+ if { [llength $logfiles] > 0 } {
+ # More than one log.0000000001 file may be produced
+ # per recd test, so we generate unique names:
+ # recd001-0-log.0000000001, recd001-1-log.0000000001,
+ # and so on.
+ # We may also have log.0000000001, log.0000000002,
+ # and so on, and they will all be dumped together
+ # by db_printlog.
+ set count 0
+ while { [file exists \
+ $dest/$upgrade_name-$count-log.tar.gz] \
+ == 1 } {
+ incr count
+ }
+ set newname $upgrade_name-$count-log
+
+ # Run db_printlog on all the log files
+ if {[catch {exec $util_path/db_printlog -h $dir > \
+ $dir/$newname.prlog} res] != 0} {
+ puts "Regular printlog failed, try encryption"
+ eval {exec $util_path/db_printlog} -h $dir \
+ -P $passwd > $dir/$newname.prlog
+ }
+
+ # Rename each log file so we can identify which
+ # recd test created it.
+ foreach logfile $logfiles {
+ set lognum [string range $logfile \
+ end-9 end]
+ file rename $logfile $dir/$newname.$lognum
+ }
+
+ set cwd [pwd]
+ cd $dir
+
+ catch {eval exec tar -cvf $dest/$newname.tar \
+ [glob $newname*]}
+ catch {exec gzip -9v $dest/$newname.tar}
+ cd $cwd
+ }
+ }
+
+ if { $gen_dump == 1 } {
+ # Save dump files. We require that the files have
+ # been created with the extension .dmp.
+ set dumpfiles [glob -nocomplain $dir/*.dmp]
+
+ foreach dumpfile $dumpfiles {
+ set basename [string range $dumpfile \
+ [expr [string length $dir] + 1] end-4]
+
+ set newbasename $upgrade_name-$basename
+
+ # Rename dumpfile.
+ file rename $dumpfile $dir/$newbasename.dmp
+
+ set cwd [pwd]
+ cd $dir
+ catch {eval exec tar -cvf $dest/$newbasename.tar \
+ [glob $newbasename.dmp]}
+ catch {exec gzip -9v $dest/$newbasename.tar} res
+ cd $cwd
+ }
+ }
+}
+
+proc upgrade_dump { database file {stripnulls 0} } {
+ global errorInfo
+ global encrypt
+ global passwd
+
+ set encargs ""
+ if { $encrypt == 1 } {
+ set encargs " -encryptany $passwd "
+ }
+ set db [eval {berkdb open} -rdonly $encargs $database]
+ set dbc [$db cursor]
+
+ set f [open $file w+]
+ fconfigure $f -encoding binary -translation binary
+
+ #
+ # Get a sorted list of keys
+ #
+ set key_list ""
+ set pair [$dbc get -first]
+
+ while { 1 } {
+ if { [llength $pair] == 0 } {
+ break
+ }
+ set k [lindex [lindex $pair 0] 0]
+ lappend key_list $k
+ set pair [$dbc get -next]
+ }
+
+ # Discard duplicated keys; we now have a key for each
+ # duplicate, not each unique key, and we don't want to get each
+ # duplicate multiple times when we iterate over key_list.
+ set uniq_keys ""
+ foreach key $key_list {
+ if { [info exists existence_list($key)] == 0 } {
+ lappend uniq_keys $key
+ }
+ set existence_list($key) 1
+ }
+ set key_list $uniq_keys
+
+ set key_list [lsort -command _comp $key_list]
+
+ #
+ # Get the data for each key
+ #
+ set i 0
+ foreach key $key_list {
+ set pair [$dbc get -set $key]
+ if { $stripnulls != 0 } {
+ # the Tcl interface to db versions before 3.X
+ # added nulls at the end of all keys and data, so
+ # we provide functionality to strip that out.
+ set key [strip_null $key]
+ }
+ set data_list {}
+ catch { while { [llength $pair] != 0 } {
+ set data [lindex [lindex $pair 0] 1]
+ if { $stripnulls != 0 } {
+ set data [strip_null $data]
+ }
+ lappend data_list [list $data]
+ set pair [$dbc get -nextdup]
+ } }
+ #lsort -command _comp data_list
+ set data_list [lsort -command _comp $data_list]
+ puts -nonewline $f [binary format i [string length $key]]
+ puts -nonewline $f $key
+ puts -nonewline $f [binary format i [llength $data_list]]
+ for { set j 0 } { $j < [llength $data_list] } { incr j } {
+ puts -nonewline $f [binary format i [string length \
+ [concat [lindex $data_list $j]]]]
+ puts -nonewline $f [concat [lindex $data_list $j]]
+ }
+ if { [llength $data_list] == 0 } {
+ puts "WARNING: zero-length data list"
+ }
+ incr i
+ }
+
+ close $f
+ error_check_good upgrade_dump_c_close [$dbc close] 0
+ error_check_good upgrade_dump_db_close [$db close] 0
+}
+
+proc _comp { a b } {
+ if { 0 } {
+ # XXX
+ set a [strip_null [concat $a]]
+ set b [strip_null [concat $b]]
+ #return [expr [concat $a] < [concat $b]]
+ } else {
+ set an [string first "\0" $a]
+ set bn [string first "\0" $b]
+
+ if { $an != -1 } {
+ set a [string range $a 0 [expr $an - 1]]
+ }
+ if { $bn != -1 } {
+ set b [string range $b 0 [expr $bn - 1]]
+ }
+ }
+ #puts "$a $b"
+ return [string compare $a $b]
+}
+
+proc strip_null { str } {
+ set len [string length $str]
+ set last [expr $len - 1]
+
+ set termchar [string range $str $last $last]
+ if { [string compare $termchar \0] == 0 } {
+ set ret [string range $str 0 [expr $last - 1]]
+ } else {
+ set ret $str
+ }
+
+ return $ret
+}
+
+proc get_log_vers { env } {
+ set stat [$env log_stat]
+ foreach pair $stat {
+ set msg [lindex $pair 0]
+ set val [lindex $pair 1]
+ if { $msg == "Log file Version" } {
+ return $val
+ }
+ }
+ puts "FAIL: Log file Version not found in log_stat"
+ return 0
+}
+
diff --git a/db-4.8.30/test/wordlist b/db-4.8.30/test/wordlist
new file mode 100644
index 0000000..cb23d0d
--- /dev/null
+++ b/db-4.8.30/test/wordlist
@@ -0,0 +1,10001 @@
+addresser
+cataract
+colonially
+atoned
+avow
+bathroom
+adjutants
+cooperate
+benighted
+apologist's
+affidavits
+baptisms
+bubbling
+classic
+allaying
+component
+battlement
+backtrack
+
+courage
+bore
+advertisement
+attests
+bunny's
+airlifts
+cajole
+cataloging
+airily
+collected
+abridged
+compel
+aftermath
+barrow
+approve
+chillier
+bequest
+attendant
+abjures
+adjudication
+banished
+asymptotes
+borrower
+caustic
+claim
+cohabitation
+corporacies
+buoy
+benchmark's
+averting
+anecdote's
+caress
+annihilate
+cajoles
+anywhere
+apparitions
+coves
+bribed
+casually
+clue's
+asserted
+anaesthesia
+columnated
+bogs
+astral
+barbed
+captives
+acclaims
+architects
+abstained
+attitude
+accumulating
+coalesced
+angelic
+agnostic
+breathed
+bother
+congregating
+amatory
+caging
+countryside
+chapel
+buttonhole
+bartenders
+bridging
+bombardment
+accurately
+confirmed
+alleviated
+acquiring
+bruise
+antelope
+albums
+allusive
+corker
+cavity's
+compliment
+climb
+caterpillar
+almond
+authenticated
+balkan
+assembly's
+acidity
+abases
+bonny
+been
+abbots
+abductor's
+aerials
+cancels
+chalked
+beeps
+affirms
+contrariness
+clearest
+appropriations
+critiquing
+affluence
+bouts
+abiding
+comprises
+brunches
+biology
+conceptualization's
+assaying
+abutter
+adorable
+beatable
+appenders
+aggressors
+agrarian
+bottleneck
+angled
+beholds
+bereaved
+creation
+animated
+candied
+bar
+aeronautics
+cousin's
+cleaver
+alienation
+billet
+bungler
+contention
+businessman
+braids
+assert
+boisterous
+consolidate
+breathing
+ballot
+averted
+conscientiously
+bellow
+brazenness
+coaches
+bulldog
+classify
+checksum
+almond's
+cornered
+caskets
+capacitors
+beefer
+connoisseurs
+consisted
+adore
+circumvented
+colonels
+addenda
+boost
+compatibility's
+bumblebee
+commonest
+containment
+active
+absorption's
+creaks
+administer
+beset
+aborted
+aforesaid
+aridity
+broken
+azimuths
+aerial
+addition's
+aggrieve
+anthology
+circuitous
+checks
+alley's
+beam
+boss
+corrupting
+absolutes
+asteroid's
+bandstands
+beatitude's
+analogue's
+busts
+confession
+bedstead
+affairs
+blackmailers
+collared
+buckboard
+assassin
+accessor
+adjudging
+binders
+constituent's
+blister
+aromas
+approved
+absorbent
+barbarously
+cat's
+builder
+brandish
+assailing
+constitute
+christening
+acutely
+amount
+blurry
+blocks
+advertise
+chain
+brigade's
+confusion
+beds
+arrangers
+colonizers
+beautifying
+bankruptcy
+bedazzles
+candidates
+clearness
+admonishment's
+behind
+abbreviations
+basting
+ballasts
+amateurism
+celled
+constituted
+calibrate
+brambly
+befuddles
+azure
+busiest
+admiringly
+appropriator
+accumulator
+cables
+abhor
+civil
+botulinus
+creaked
+bismuth
+astronomical
+abscissas
+bodice
+aunt
+cascades
+cares
+comradeship
+assemblages
+boater
+bellmen
+admission's
+ambitious
+baldness
+abortive
+controlled
+chinked
+coded
+courtrooms
+arteriolar
+cooler's
+cared
+brewer
+christians
+barbecues
+contacts
+blackjack's
+buzzing
+blasters
+accords
+braziers
+allegretto
+catered
+breveting
+cleaning
+amicably
+bummed
+consulted
+allegro's
+accumulator's
+compartmented
+condemned
+concludes
+bitwise
+cheered
+appropriator's
+accessors
+casting
+carolina's
+accompanying
+budding
+correspond
+bach's
+angel's
+bearing
+arresters
+biweekly
+character
+badgering
+cantankerous
+avalanching
+adjudges
+barometer
+append
+continuations
+burped
+boxtop's
+abstention
+amp
+axiomatized
+bimonthlies
+aghast
+arresting
+breakwater's
+continuing
+bridle
+bobbin's
+antagonistically
+blindly
+biochemical
+biologically
+antifundamentalist
+confer
+cloudiness
+bonded
+comfortingly
+caption
+blackmailed
+bidders
+breakpoint
+brigadier
+criminals
+coyotes
+casserole's
+annex
+cereals
+breadboxes
+belgian
+conductivity
+counterexample
+anarchist
+couches
+atavistic
+clipped
+button
+axiomatic
+capping
+correcting
+chase
+chastise
+angle
+burnished
+beauteously
+antipodes
+crippling
+crowns
+amends
+bah
+brigadiers
+alleged
+correctives
+bristles
+buzzards
+barbs
+bagel
+bonfire
+bugled
+advisee's
+battled
+budded
+burners
+causeway's
+adaptation
+caliber
+browner
+apprehensions
+bonnet
+anachronistically
+composites
+bothered
+assurer
+arc
+chaser
+bastards
+calmed
+bunches
+apocalypse
+countably
+crowned
+contrivance
+boomerang's
+airplane's
+boarded
+consumption
+attuning
+blamed
+cooing
+annihilation
+abused
+absence
+coin
+coronaries
+applicatively
+binomial
+ablates
+banishes
+boating
+companions
+bilking
+captivate
+comment
+claimants
+admonish
+ameliorated
+bankruptcies
+author
+cheat
+chocolates
+botch
+averring
+beneath
+crudely
+creeping
+acolytes
+ass's
+cheese's
+checksum's
+chillers
+bracelet
+archenemy
+assistantship
+baroque
+butterfly
+coolie's
+anecdote
+coring
+cleansing
+accreditation
+ceaselessly
+attitudes
+bag
+belong
+assented
+aped
+constrains
+balalaikas
+consent
+carpeting
+conspiracy
+allude
+contradictory
+adverb's
+constitutive
+arterial
+admirable
+begot
+affectation
+antiquate
+attribution
+competition's
+bovine
+commodores
+alerters
+abatements
+corks
+battlements
+cave
+buoys
+credible
+bowdlerizes
+connector
+amorphously
+boredom
+bashing
+creams
+arthropods
+amalgamated
+ballets
+chafe
+autograph
+age
+aid
+colleague's
+atrocious
+carbonizing
+chutes
+barbecued
+circuits
+bandages
+corporations
+beehive
+bandwagon
+accommodated
+councillor's
+belted
+airdrop
+confrontations
+chieftain's
+canonicalization
+amyl
+abjectness
+choke
+consider
+adjuster
+crossover's
+agreeing
+consolations
+capitalizers
+binges
+annihilating
+callers
+coordinate
+banshees
+biscuits
+absorbency
+corollary
+corresponded
+aristocrat's
+banally
+cruiser
+bathtub's
+abbreviated
+balkiness
+crew
+acidulous
+air
+birdies
+canvassing
+concretion
+collectively
+chasteness
+chapels
+copiousness
+benign
+armies
+competing
+buss
+awakened
+breakpoint's
+conceptualizing
+cleansers
+acorns
+conveyance's
+bluer
+battle
+budges
+characteristically
+be
+contour
+beguiling
+awarding
+armhole
+airship's
+bathtub
+breathable
+crowded
+compiles
+certain
+brutalizing
+bacteria
+baronies
+abode
+blacksmith
+brinkmanship
+capitalizations
+cousin
+botany
+avionic
+companion
+consists
+connoisseur's
+avalanched
+claimant's
+backstitches
+affixes
+bikes
+atomically
+cowed
+asleep
+becomingly
+acorn's
+complainers
+appreciated
+cross
+cringed
+booting
+attitudinal
+broadcasting
+childishly
+breeze's
+craven
+boll
+clause's
+burden
+appendages
+atemporal
+allah
+carnival's
+anchorage
+adjures
+besought
+abounding
+crucifying
+arrangements
+antiquarians
+burrows
+antipode
+canvas
+constable's
+coopers
+ascended
+companionship
+bakery's
+bayonets
+conclusively
+boasters
+beneficiaries
+conspicuous
+contriver
+architecture
+breakthroughs
+brownie's
+blur
+academics
+antagonist
+contemplates
+arena
+caravan's
+administers
+comprehensively
+convey
+bigot
+blitz
+bibliography's
+coerced
+assail
+amazons
+banned
+alabaster
+concluding
+bouquet
+barks
+acquaintances
+astonishment
+constraint
+backpack's
+breakthroughes
+blocking
+accomplishers
+catastrophe
+bushels
+algae
+ailment's
+anemometers
+beginning's
+chefs
+converse
+cornerstone
+astound
+assuring
+adornment
+anyone
+alumni
+club
+bestselling
+businessmen
+constructed
+attendee's
+cooped
+ablute
+chronicler
+alaska
+clam
+canonicals
+concerned
+aligned
+creek
+burrow
+allay
+admirals
+blackens
+compressing
+confirm
+cows
+battleship's
+belched
+affixing
+chalices
+choirs
+absentee's
+baseboard's
+apportionment
+adheres
+accounts
+chef
+access
+clearings
+accompanists
+concentrating
+ado
+bathos
+bailiff
+continuance
+ball
+bearer
+congress
+cites
+can't
+balloon
+crams
+consults
+bungled
+bike's
+apes
+assassinations
+colt's
+consecrate
+ancients
+chick
+analyst
+adsorbing
+burntly
+accompanist's
+apprehensive
+bengal
+boughs
+ankles
+anchored
+benefits
+accommodation
+amiss
+brink
+chewers
+blueberry's
+chairs
+adjoin
+bivalve
+autobiography's
+automated
+comparisons
+climbed
+artists
+congruent
+cold
+atonement
+cashier
+armageddon
+allocations
+bereavements
+bumblebees
+blew
+busboys
+bottoming
+alternations
+apprenticed
+bestial
+cinder's
+consumption's
+abbey's
+amended
+continued
+birefringent
+barbados
+ability's
+compulsory
+antler
+centerpieces
+accountant's
+arrogant
+ballads
+ascenders
+appliers
+adjustment's
+blabbed
+baits
+activity's
+clod's
+adjudicating
+bleak
+commutes
+bumming
+beating
+cohesiveness
+branded
+acknowledger
+communications
+blockhouses
+booklets
+consenters
+creek's
+consulting
+binary
+coaster
+ascription
+bushwhack
+boggles
+affidavit's
+arrangement's
+congressionally
+convenient
+avoider
+abaft
+bootlegger's
+befriending
+ceases
+carbonizes
+clumps
+commented
+competence
+conversing
+butting
+astonishing
+armful
+allegory's
+crisis
+critiques
+concurred
+conservative
+aristotelian
+blizzard's
+corner
+amateur's
+compare
+affiliations
+bestseller
+batch
+cleanly
+assayed
+bravos
+bowls
+conceptualized
+babe's
+algorithm's
+baptist
+cheeks
+conquerer
+bidder's
+behaving
+briefcase's
+analogues
+amply
+attitude's
+apple
+crossable
+ambushed
+besmirches
+creditors
+bandwagons
+continentally
+adjuncts
+concerns
+agers
+cop
+amoebas
+bisected
+bombing
+appendices
+cocking
+bused
+babied
+blackjacks
+controller's
+aquarius
+charm
+clip
+awarder
+consistently
+calibrated
+bushwhacking
+avaricious
+ceaselessness
+basically
+accolades
+adduction
+commending
+consulates
+certifiable
+admire
+appropriateness
+bandlimits
+chill
+adds
+constable
+chirping
+cologne
+cowardice
+baklava
+amusedly
+blackberry
+crises
+bedeviling
+botching
+backbend
+attaining
+continuity
+artistry
+beginner
+cleaner's
+adores
+commemorating
+amusement
+burial
+bungalow's
+abstinence
+contractually
+advancement's
+conjecture
+buckling
+conferrer
+bankers
+cherub's
+belonged
+classifications
+baseball
+carbonation
+craved
+bans
+aphid
+arbor
+ague
+acropolis
+applied
+aspired
+calibrating
+abundance
+appeased
+chanted
+ascent
+convenes
+beep
+bottles
+aborigines
+clips
+acquainting
+aiming
+creditor's
+abolitionists
+cloves
+containments
+bungling
+bunt
+anchors
+brazed
+communicator's
+brew
+accumulate
+addicting
+actively
+befog
+anachronisms
+bumblers
+closest
+calculators
+absurdity
+colleagues
+college
+assesses
+conflicted
+associational
+betide
+conceptualization
+adjutant
+alliances
+corresponding
+barometers
+cot
+brooch's
+coiled
+arboreal
+convicted
+artless
+certificates
+bourbon
+astonish
+bust
+correlate
+amounts
+anal
+abstraction's
+corns
+conqueror's
+boldly
+bob's
+beer
+blanks
+corpses
+contingent
+blackly
+backed
+appearances
+cancers
+actuating
+apprehension's
+colorings
+anglicanism
+armament
+armer
+bizarre
+begotten
+actions
+archly
+capriciously
+clue
+contractor
+contributions
+agendas
+coached
+blamable
+annoyers
+coupons
+brooked
+assortment
+axes
+celebrates
+courageously
+baroqueness
+blasphemous
+asserter
+contents
+correctly
+challenged
+bulldoze
+casement
+acknowledge
+bitterness
+belongs
+allotments
+chalice's
+bequest's
+adjacent
+consumer's
+conservatively
+coalition
+background's
+backache
+befouls
+brushfire's
+analysts
+branch
+airways
+awaiting
+breakfast
+anoints
+baying
+contrary
+bilge
+chasm's
+babes
+afresh
+centerpiece's
+barked
+coffin
+assumed
+actresses
+accentuating
+aching
+abet
+balancers
+consumptively
+cagers
+backing
+angiography
+chord's
+cheapened
+bewailed
+arson
+begged
+convergent
+bowlers
+conflicting
+confiscated
+bitch
+bloody
+brushfires
+bleach
+computation's
+choppers
+circuitously
+chancing
+bunker
+concept's
+alacrity
+boyhood
+ammo
+bobwhites
+carter
+ardent
+bier
+airway's
+brownies
+aura
+cannibalizing
+confirms
+australian
+barrage
+closures
+assertive
+abstainer
+bicarbonate
+clone
+back
+cipher
+crown
+cannibalizes
+away
+crafty
+airings
+amtrak
+comical
+burnish
+continuum
+apparition
+apologizing
+blot
+blacker
+characters
+built
+apparent
+applicative
+assiduous
+attorneys
+affectionately
+bobbing
+baggy
+comic's
+attempt
+appealers
+amortize
+bonanza
+backwards
+bowers
+anemometer
+ambulance's
+creeps
+abduction's
+coal
+chiller
+adjudications
+clogging
+ascending
+bookkeeper
+crawlers
+battery's
+artifacts
+attributions
+amusements
+aftermost
+allophones
+bemoaned
+comptroller
+bugger's
+buoyancy
+booboo
+award
+amplifying
+certify
+bivariate
+attunes
+asteroidal
+chant
+compounds
+asserts
+believably
+alert
+apostate
+catalysts
+aureomycin
+convex
+beetle's
+banishing
+agitating
+bystanders
+bow
+connotes
+blanch
+charmingly
+animal's
+baritones
+brier
+astronomer
+company's
+balding
+actually
+aunt's
+avalanches
+acquisition
+base
+compilations
+bathtubs
+actualization
+chanced
+atom
+banged
+befuddled
+apologized
+componentwise
+britisher
+began
+conservationist
+actuate
+crosser
+appended
+bitten
+ambivalence
+acetate
+conversions
+buzzwords
+askance
+abolishing
+birdied
+creeds
+anglers
+colossal
+bereft
+chock
+apprentice
+cooper
+besmirching
+allocating
+antiques
+bikini's
+bonders
+afflictive
+augmentation
+atheist
+bucket
+bibliophile
+annexes
+beguiles
+birdbaths
+amendments
+animators
+asymptotically
+communally
+barber
+biographers
+arguable
+confidant
+apologies
+adorns
+contacting
+coarsest
+artichokes
+arraign
+absorbing
+alden
+commercially
+cabbage's
+coincides
+clumping
+cents
+alleviater
+buzzard
+braked
+anesthetized
+bugling
+capitalist
+befriended
+appreciatively
+boomtown's
+cozier
+critic's
+correspondent
+bard
+attenuator
+bake
+brings
+chews
+anechoic
+brutal
+colder
+buckshot
+canvassers
+analytic
+allies
+alloys
+awake
+alienates
+bin's
+crimes
+constructible
+classifiers
+bulb
+cream
+banquet
+axiomatize
+adjourn
+converted
+auditioned
+comfortably
+bandwidth
+cannibalize
+ascensions
+bussing
+balloons
+contenders
+commemoration
+aspersions
+consultation
+cashes
+belting
+augurs
+architectural
+bluebird's
+breastworks
+absconded
+bullets
+bloodstain's
+blunder
+astronautics
+coo
+approves
+authority
+assure
+amsterdam
+acquitted
+adversity
+celebrate
+bred
+bridged
+bloc's
+bullied
+affinity
+breezes
+baptistry's
+constitutions
+avouch
+amazingly
+consolation
+abnormality
+clashes
+buttes
+buzzard's
+breathers
+chipmunk
+contented
+carol's
+armers
+amazedly
+comprehends
+canonicalize
+breakthrough
+arbitrator
+butterfat
+cases
+besiegers
+affianced
+amelia
+bush
+airplane
+annulled
+bike
+alternated
+attackers
+crude
+carelessness
+akin
+combated
+assisting
+clocker
+attacked
+briefed
+antic's
+attendants
+attracting
+cope
+allotting
+bandwidths
+add
+assaulting
+breakage
+climes
+arrival's
+burp
+accelerator
+capacitance
+arabians
+bankruptcy's
+archeological
+coins
+browbeating
+convene
+aficionado
+anachronism's
+chasm
+cardinalities
+compartmentalize
+courter
+assess
+abreaction
+brakes
+compatibly
+compression
+characterizable
+briefing's
+alto's
+classifiable
+contrast
+correlation
+colonial
+applying
+authorizers
+contesters
+basely
+cherries
+clicking
+cornfield's
+alarmingly
+conferences
+business's
+banker
+bloomed
+airfield
+attracts
+building
+commutative
+atomization
+competitions
+boatsmen
+acquirable
+arkansas
+command
+beings
+compactors
+anodize
+arguments
+conforming
+adsorption
+accustomed
+blends
+bowstring's
+blackout
+appender
+buggy
+bricklaying
+chart
+calmer
+cage
+attractive
+causation's
+athenian
+advise
+cranks
+containers
+besotter
+beret
+attender
+cone
+bills
+aligns
+brushlike
+brownest
+bosom's
+berth
+accountably
+bequeathed
+affirmatively
+boundless
+alleyways
+commute
+bendable
+abhors
+calculation
+affidavit
+answerable
+bellicose
+counterfeiting
+admiral's
+chisel
+bridesmaids
+believers
+aggregated
+conspicuously
+abased
+armenian
+conspirator
+canonical
+assignable
+barrage's
+clearance's
+casts
+administratively
+befoul
+chaffer
+amazer
+colorer
+broaching
+crevice
+aniline
+coursing
+compassionate
+adhesive
+bibliographies
+corrects
+augments
+between
+causer
+amorist
+cellist's
+acoustical
+baseless
+cigarettes
+astuteness
+appropriators
+convincing
+bellhop's
+bemoaning
+calmingly
+chronologically
+castles
+algebraically
+appointees
+academic
+blunderings
+assassins
+barrel
+accuracy
+amortized
+ballpark
+acrobat's
+brazier's
+abortively
+coarser
+airfields
+contester
+circus's
+creased
+amorphous
+accomplisher
+blabs
+butchers
+crackles
+bachelor
+aviators
+chariot's
+circumflex
+binocular
+alienating
+artificially
+agreement's
+aglow
+afghan
+abrupt
+annihilates
+apologetic
+barge
+betters
+algorithms
+conjurer
+chargeable
+brindle
+alphabetizes
+coder
+availing
+bandpass
+arrogance
+convent's
+advertiser
+connected
+basso
+breakfaster
+comic
+congenial
+beau
+courters
+adapters
+abruptly
+chemicals
+bringed
+creaming
+butterer
+attained
+actuals
+averred
+brainwash
+centerpiece
+blabbermouth
+byproduct's
+adaptable
+automata
+art
+cheery
+beheld
+beehive's
+claimed
+crucial
+brokenness
+agility
+combating
+cleft
+amenity
+after
+configuration
+contrasting
+coarsely
+brass
+barnstormed
+bowel
+bridesmaid's
+cornfield
+crazing
+autocracies
+adult
+conceptualizations
+corroboration
+bedders
+arroyo
+alarmist
+boatman
+chests
+burglary
+budgets
+canary's
+arraigning
+chin
+barnstorms
+blamers
+brimful
+calculate
+cellular
+contended
+challenges
+brusque
+bikinis
+arithmetics
+chairpersons
+class
+aircraft
+capably
+centralize
+awhile
+compacting
+courteous
+archaeologist's
+cram
+adagio
+affronts
+amplitude's
+bureau's
+audaciously
+autism
+blueberries
+an
+chips
+confiner
+chopper's
+chronology
+breaching
+bead
+amass
+camouflage
+compensation
+aspect
+broker
+atrophy
+balk
+bloodless
+barnyard
+benefactor's
+airdrops
+caused
+anthem
+activist's
+bottomless
+arrogates
+avoided
+bouncy
+clarified
+articulate
+almoner
+communists
+blokes
+butternut
+clockings
+barium
+blows
+criticism's
+associations
+brute
+bleeds
+alliteration's
+bluestocking
+boxwood
+clearer
+allegiance
+conceptualizes
+captivating
+bolshevik's
+belabored
+biographic
+contaminates
+chanticleer's
+adjusted
+childhood
+arguing
+cape
+conversantly
+compensating
+collaborations
+arraignment's
+blasted
+charging
+aggregation
+apprentices
+bird
+codifiers
+ballistic
+breve
+bells
+carolina
+chalk
+buckles
+boyfriend's
+adorn
+accoutrements
+availability
+antisymmetry
+blades
+alluded
+asterisks
+bookcases
+additive
+consents
+advanced
+balalaika
+coders
+caliph
+alundum
+are
+controllable
+blazing
+clattered
+asiatic
+axiomatizes
+ace
+coining
+column
+auditor's
+carol
+concatenated
+arrayed
+capital
+cautioner
+clan
+beauteous
+abbreviate
+asteroids
+canal's
+consolidation
+closets
+concealer
+crevices
+abed
+complex
+conviction's
+abide
+arrests
+begrudges
+adolescent
+conceals
+cells
+circles
+bravest
+compromiser
+bagels
+areas
+afore
+allergies
+arrangement
+attraction's
+amulets
+abstraction
+captured
+crouched
+brothers
+cash
+achieving
+bastard
+compete
+boiling
+beaching
+amphetamines
+clerking
+congestion
+alleviates
+angry
+bared
+comprehended
+bloodstain
+constituency's
+automating
+aerial's
+counterfeit
+besotted
+basses
+biofeedback
+compilation's
+band
+consulate
+appellant
+cough
+antennae
+contend
+anniversary
+boor
+artifactually
+aerobics
+booths
+chubbiest
+consumable
+assignments
+bromide's
+confined
+breakers
+alongside
+courtier
+boisterously
+bilaterally
+alternation
+auspiciously
+arbitrated
+condemning
+burns
+correspondents
+composition
+cavalierly
+coverlets
+capacities
+clatter
+apotheoses
+cartography
+ceased
+capitalized
+auditor
+appendicitis
+chops
+barony
+anemometry
+befouled
+briefer
+chest
+begetting
+bloats
+bookseller's
+commitment
+confides
+carcass's
+battering
+altruistically
+ballots
+adornments
+broaden
+angularly
+coefficient
+cataloged
+brae
+advantage
+anthems
+calculated
+counseling
+agitate
+accentuated
+camel
+ambivalent
+bedposts
+beacons
+chubbier
+cheerer
+assumes
+concord
+autumns
+convention's
+alpha
+adulterates
+arbiters
+archaically
+criteria
+achilles
+cheaper
+bulling
+associators
+bloater
+brawler
+ability
+adherents
+commonwealth
+coyote's
+centrally
+bequeathing
+abandonment
+circumstantially
+courteously
+borrow
+countermeasure's
+capricious
+allied
+anagram's
+absorptive
+assuage
+asset
+booked
+aspects
+commits
+crates
+capacitive
+condones
+assimilates
+carriage
+competitor's
+cocoons
+aggravated
+caravans
+arbitrator's
+baked
+balanced
+annihilated
+addressable
+autonomous
+bandwagon's
+contesting
+burrowing
+coroutines
+abjection
+correctable
+applauded
+bragged
+code
+aggressiveness
+cluttered
+attacking
+chide
+am
+coasters
+blizzard
+contentment
+altruism
+certifier
+capturing
+combinators
+carefree
+activate
+blindfolding
+assassinating
+approximate
+biplane's
+aplenty
+arteriosclerosis
+concentrates
+antisymmetric
+assurances
+anarchist's
+ascend
+advancing
+atrocities
+butt's
+bearable
+craftiness
+categorized
+barn
+contributor's
+arises
+bushy
+bisque
+coasted
+bargaining
+area's
+couples
+cabs
+barter
+bulletin
+chisels
+broadcasters
+contingency
+bywords
+antimicrobial
+coexisted
+blinding
+arithmetize
+coweringly
+convince
+competed
+bauble's
+crab
+boggling
+advocacy
+atlas
+assembled
+ancient
+bloodstream
+balking
+bin
+bully
+affirm
+cruelest
+atone
+conserved
+confession's
+bat
+captive
+aster
+blames
+colonel's
+bones
+borderline
+cleanses
+classified
+crudest
+contiguity
+bailing
+ablaze
+bender
+attendee
+clobbers
+aliasing
+boats
+brand
+church
+bandy
+adhering
+barred
+ammunition
+chime
+accompaniment's
+battleground's
+composing
+caveats
+armor
+amoeba
+composure
+collides
+avowed
+banding
+counsels
+asymmetric
+abbreviates
+balky
+adjudicates
+anointing
+accursed
+copse
+action
+construction's
+accents
+ambition's
+caressing
+autopilot
+coolers
+cache
+allayed
+barnyards
+britons
+appointment
+adaptor
+blockers
+abridges
+bloodiest
+betrothal
+bombards
+bony
+bus
+canary
+antinomy
+awash
+comrades
+ablating
+collectible
+cosmetic
+accession
+clutters
+censures
+allusions
+belittled
+armchair
+abode's
+conception's
+ascribe
+aliases
+ancestry
+ax
+companionable
+aright
+boxed
+brighteners
+alloy's
+checkable
+arraignments
+bed
+bunkhouses
+abbeys
+ceasing
+companies
+cherishing
+chunk's
+barony's
+chinning
+burdens
+briskness
+beggarly
+beloved
+clambered
+constitutionality
+beguiled
+archers
+alleyway
+apostle's
+consulate's
+antiformant
+categories
+construct
+aliments
+acquired
+blotted
+alterations
+adolescent's
+cranes
+bluntest
+accusation
+chafer
+airstrips
+abolished
+bothersome
+churchly
+airy
+bedded
+awareness
+alliterative
+arose
+amputates
+civilization's
+arenas
+certifying
+aspirators
+carbon's
+bunching
+aerates
+bilked
+checking
+cloned
+administrations
+canvasses
+colorless
+chamber
+circumspectly
+benedictine
+advisedly
+classifier
+approachable
+banners
+concurrently
+chores
+agape
+convention
+bindings
+budget
+comedies
+ants
+ambassadors
+chroniclers
+carrots
+colorful
+bulkhead's
+coherence
+buyer
+aggressions
+congressional
+commoners
+cheapen
+concealed
+columnates
+anarchy
+actress's
+baseboards
+creature's
+centuries
+barbarian
+concrete
+bicycles
+acceptably
+acclimating
+biceps
+bloodhound's
+becalmed
+apostle
+bible
+conjunctive
+comb
+ballers
+bickering
+adulterous
+austrian
+applicable
+blackberries
+creasing
+catalogs
+avert
+asparagus
+cambridge
+bird's
+belgians
+admonished
+admirations
+conscientious
+crescent's
+connectives
+blissful
+commenting
+bagged
+assimilate
+abounded
+copyright's
+advancement
+axiom's
+compilation
+circumlocution's
+catheter
+chances
+concretely
+codification
+browned
+clustering
+bum's
+clauses
+boundlessness
+arteriole's
+alfresco
+begrudged
+blustered
+anglican
+adjoined
+bamboo
+bathed
+consortium
+carrot's
+cloak
+album
+bunglers
+approbate
+colored
+aim
+cowboy
+alienate
+cleverest
+ambiguous
+confrontation's
+clear
+africa
+bowline's
+astronauts
+belayed
+censorship
+animation
+bedrooms
+chasms
+compared
+cogitated
+barbarians
+accomplices
+columnizes
+beaming
+busied
+counterpointing
+aluminum
+coconut's
+acclamation
+chokers
+biomedicine
+basalt
+buckwheat
+cardinality's
+bafflers
+arid
+chap's
+abound
+biblical
+backbone
+anticipation
+condemner
+angular
+advisability
+believing
+boiler
+arclike
+abetter
+bespeaks
+axiomatically
+coarse
+auditions
+bludgeoning
+clam's
+chief
+arrow
+cementing
+anxiety
+aberrations
+brushes
+cherub
+corollary's
+bunters
+beefers
+barbiturate
+circumlocution
+conjoined
+charities
+coverage
+campaigner
+burrowed
+barracks
+bristling
+accomplice
+abandoned
+bull
+caked
+century's
+bantu
+bristled
+airer
+bench
+bevy
+chamberlain's
+attention
+cloning
+camouflaging
+alder
+counter
+credibly
+approvingly
+breakup
+artillery
+celestially
+bail
+baker
+bullish
+canvass
+conversationally
+bringers
+augment
+creditably
+butterers
+botswana
+contemptible
+bribing
+adumbrate
+barb
+calico
+alludes
+amplified
+chills
+cloak's
+aver
+arthropod's
+budgeter
+bereavement
+cellars
+crewing
+blackmailer
+ayes
+bedsteads
+breachers
+bazaar
+centered
+celebrity
+blameless
+abscissa
+aerators
+awaited
+british
+adversary
+cowslip
+buttons
+confusing
+buggy's
+belts
+canceled
+addresses
+bribes
+condoning
+bonneted
+coarsen
+amazement
+angels
+chemise
+carbonates
+apostolic
+bandit's
+contending
+consummate
+counterclockwise
+beneficence
+benefitted
+contradicts
+comfortabilities
+anemone
+conductive
+articles
+bookcase
+burst
+baptizes
+countless
+costs
+agonizes
+byte
+creeper
+begs
+bunnies
+attract
+able
+calories
+baskets
+american
+brunt
+cognition
+closing
+chef's
+backbone's
+complicates
+cloister
+bedsprings
+arrays
+brigs
+archbishop
+buckler
+clove
+catholic's
+bellboys
+chairmen
+clap
+clarifications
+ambuscade
+bight
+bellyfull
+allowance's
+academy's
+construe
+cancer
+bay
+aristocratic
+alleviaters
+binoculars
+axiomatizing
+changer
+bustle
+civic
+ariser
+axiomatization
+aggravates
+confiscation
+bowdlerize
+backspaced
+alters
+clarity
+blots
+bland
+belligerent's
+burgher
+cardinally
+bookcase's
+buggers
+byte's
+avarice
+bostonians
+crops
+authorizations
+cogitation
+baptize
+caressed
+abase
+crowding
+beriberi
+allegories
+coronets
+cell
+calculative
+adduce
+amperes
+bladders
+adages
+contests
+cognizant
+actuates
+ambiguity
+brighten
+concert
+conviction
+booty
+ashtray
+braves
+blouses
+avoiders
+confederate
+bombings
+couplings
+convictions
+attractiveness
+chronicled
+corers
+anger
+covertly
+aural
+asynchrony
+arrowheads
+breakdown's
+bulletins
+acquiescence
+ambush
+catches
+at
+billion
+contact
+bees
+adopters
+approximately
+chiseled
+attributively
+criers
+codification's
+cowslips
+contradictions
+buttock's
+categorically
+counterpart's
+confessor
+appreciably
+adjusts
+altitude
+ceremonialness
+clipper
+bracelets
+anthropomorphically
+benedict
+connecting
+bacterium
+achievers
+abutter's
+autocorrelate
+coupling
+blanketer
+continental
+assignment
+conundrum
+arab
+besides
+cheerful
+blowup
+bastion
+arrive
+combines
+agar
+cookie
+astronaut's
+constraint's
+article's
+confiscations
+bounded
+adjudicate
+belligerently
+boron
+brownness
+adept
+creep
+abduction
+accosting
+asylum
+autographed
+clash
+chiseler
+clumsily
+capitally
+braking
+absenting
+bagatelle's
+comet
+basked
+anything
+buffeted
+absentia
+bounty
+carols
+characteristic's
+constructive
+comforting
+aflame
+brainwashed
+booby
+aspirations
+adjudge
+behaviorism
+computability
+assessment
+consultations
+bowstring
+acknowledgment
+arranger
+chancellor
+attest
+compresses
+concessions
+asymmetrically
+administering
+clamoring
+arraigned
+archived
+admonition
+actor's
+aimers
+colorers
+booklet
+calibers
+affix
+bushel's
+atomizes
+creeks
+bleedings
+casuals
+archives
+certainly
+animate
+cons
+affiliate
+answered
+coyote
+coughed
+alligator's
+antagonized
+arousal
+assisted
+aerated
+competently
+conquering
+acclaimed
+assign
+announcer
+controllers
+amalgamation
+comfort
+antihistorical
+availed
+balsa
+annoyed
+basted
+asymptomatically
+cropped
+combinational
+barging
+conversant
+causality
+botches
+bedspread
+considerately
+bookstores
+climate
+blessing
+accordion's
+cdr
+bonanza's
+construing
+bearings
+bluster
+backspaces
+babyish
+countermeasure
+crime
+battered
+audit
+associating
+corps
+application
+archangel's
+aided
+breasted
+compelled
+acrobats
+breakfasts
+chronologies
+beet's
+averts
+convergence
+attributable
+adverbial
+churns
+arrest
+breastwork
+beefs
+brownie
+create
+contradistinctions
+coordinators
+abandoning
+byline
+beatitude
+autosuggestibility
+bipartite
+annals
+assents
+conceives
+amalgams
+cleft's
+clicked
+appointers
+bible's
+boots
+caret
+attaches
+controversy's
+combinatorial
+bazaars
+cardinals
+bored
+catering
+christian's
+ashman
+consequence's
+austere
+clay
+birthday's
+amongst
+arbitrariness
+brainstorms
+chateaus
+coaxer
+applause
+cautiousness
+adorned
+compromises
+creatures
+compliance
+apartheid
+archiving
+amoeba's
+communal
+comedian's
+aggressive
+crop
+ante
+better
+chalice
+aristocrats
+circling
+belittle
+abortion's
+coldly
+certification
+befriends
+courthouse
+anesthesia
+accorder
+athletic
+blithe
+bedder
+abasements
+councils
+beware
+abductor
+assonant
+clench
+aspersion
+abortion
+abating
+birches
+breakpoints
+acyclic
+ablate
+canners
+cistern
+boxtop
+composite
+cloudless
+computation
+chastely
+abusing
+bunker's
+compounding
+alveolar
+chaplains
+bias
+audiological
+capability's
+bangle
+barren
+antidote's
+cranking
+baptizing
+bond
+borders
+automobile's
+allegoric
+chargers
+baltic
+autumn
+columns
+absolute
+connoisseur
+cranberry
+contiguous
+consoled
+confirmations
+argot
+blouse
+annotated
+callous
+astounded
+crashed
+autonavigators
+chivalry
+columnating
+beefed
+convincer
+allegorical
+bagger
+assume
+containable
+artistically
+calibration
+architectonic
+campaigns
+addressability
+crazier
+buy
+brightener
+bastion's
+blurb
+awaits
+commands
+chocolate
+bleaching
+antenna
+blowers
+chorused
+composers
+assigners
+aspires
+coils
+bid
+application's
+clamped
+bedding
+awkwardly
+coppers
+costumes
+borax
+caged
+candler
+badges
+clutches
+consign
+apprised
+buys
+adiabatically
+aggregately
+canned
+abstract
+acrimony
+coax
+analytically
+absurd
+alluring
+contradicted
+aspersion's
+bribe
+boos
+chattererz
+backache's
+complying
+continent
+cohabitate
+causation
+astronomer's
+cities
+bookie
+bleating
+cracking
+bicameral
+convoluted
+adjustable
+ambulance
+can
+boulders
+consideration
+announces
+briars
+antipode's
+bartered
+ancestor
+biplanes
+characterize
+crested
+bum
+bridling
+consolable
+bungles
+coffee
+buffets
+congratulation
+commitment's
+adequately
+clown
+capacitor's
+broomsticks
+agglutinate
+activations
+asians
+canon's
+authenticity
+complexities
+cripple
+bracket
+counselor's
+beatably
+bounced
+baton's
+crankiest
+barbell's
+caster
+casseroles
+ballad's
+bob
+batched
+attenuated
+beakers
+biologist
+bleary
+condescend
+blondes
+augustness
+boldface
+battlefronts
+acumen
+bolting
+articulatory
+butyrate
+bowel's
+backwater's
+colonel
+creating
+authorized
+bijection
+accruing
+admirably
+correctness
+citadels
+clasps
+bandlimit
+bib
+appalachia
+contrives
+bundle
+audiology
+circumventing
+blinker
+choked
+bilks
+clears
+affirmations
+arbitrating
+bites
+bootstraps
+capitals
+commuters
+billeted
+authentication
+choice
+attentively
+aggressor
+arterioles
+crowds
+chestnut
+backstitched
+attachments
+assimilating
+bewilderment
+atrophied
+chintz
+blackjack
+armadillos
+bonfire's
+ballast
+agonies
+busier
+coefficient's
+adventurous
+ballet's
+coil
+chewed
+come
+bonder
+catalogue
+coursed
+arise
+biennium
+ceremony's
+blanching
+appraisers
+acolyte
+argues
+beholden
+appanage
+astatine
+banana's
+coons
+civilians
+bodyguard
+archipelago
+bug's
+candles
+antique's
+accidently
+blighted
+belgium
+besieged
+burned
+abuse
+asian
+chute
+awkwardness
+abasing
+bottler
+ardently
+blab
+breakwater
+cavity
+cheated
+befall
+according
+chronicle
+airframes
+bats
+choring
+authorize
+consumed
+chatter
+annunciated
+capers
+anomalous
+clustered
+burner
+acquaintance's
+badger's
+basic
+affectations
+buzzy
+coast
+attendances
+activating
+beams
+cohesive
+attainable
+barbecueing
+beautiful
+acronyms
+communion
+client
+atypical
+antagonists
+conservations
+arguers
+agglomerate
+antigen
+battalion
+ambition
+countered
+assistant
+classed
+arming
+alveoli
+buff's
+backplanes
+busted
+bermuda
+converting
+brutish
+boot
+acidities
+confrontation
+chapel's
+berlin
+ascender
+behead
+buddy's
+commandment
+actuated
+brilliancy
+chance
+bedrock's
+bridgeheads
+arable
+avid
+arteries
+caresser
+ballyhoo
+attested
+african
+comradely
+consciences
+commencing
+antennas
+annulments
+bobolink's
+advisee
+acceptance
+crack
+ascendent
+appendage's
+accommodates
+accumulated
+clones
+apocryphal
+ages
+cluster
+capitols
+camper
+beading
+amble
+buffeting
+circumspect
+advances
+analyzes
+courier's
+aperiodic
+appealer
+atonally
+attentive
+conspire
+appropriating
+armed
+allergic
+agglomeration
+consternation
+blinks
+audibly
+aspirins
+bunions
+adverbs
+armload
+bet's
+caring
+carryover
+coordinator's
+afterthoughts
+allays
+abided
+brownish
+baiting
+capitalism
+coined
+conspirators
+automatic
+contradistinction
+conductor's
+backstitching
+conjure
+casings
+accountant
+clinched
+constrain
+alcohol
+bee
+anticompetitive
+britain
+bade
+camera's
+antimony
+activated
+burglarizes
+compatible
+cotyledon's
+artificiality
+bath
+citadel
+archivist
+chandelier
+addiction
+ampersand
+bitterer
+constructively
+afield
+bing
+attractor's
+cringe
+allergy's
+bigots
+assimilation
+ate
+capitalization
+abridge
+buzzword
+befit
+bandlimited
+commandant
+alabama
+acculturated
+brightening
+bulldozing
+cooky
+bunks
+centers
+bespectacled
+adherent's
+abducts
+another's
+condensation
+billeting
+bye
+chess
+craziest
+ballgown's
+archaism
+consorted
+chinned
+cowl
+beat
+bootlegger
+bravado
+classically
+bulging
+browbeat
+accommodate
+borne
+bronzed
+artifice
+arcade
+become
+backlog
+addressers
+amphitheaters
+befogging
+crochet
+aiding
+celebrated
+conversational
+backbends
+authentications
+advertisement's
+blockade
+bulldozes
+contraction's
+bricklayer's
+brain
+conveying
+anemia
+chronology's
+channeling
+caution
+commanding
+crosses
+artisan
+conditions
+admired
+authenticator
+airships
+blunter
+bridesmaid
+counseled
+cheeriness
+chiefs
+boils
+clerical
+atrocity's
+balls
+ambled
+canvases
+consoles
+abscessed
+abetting
+blitzkrieg
+bottlers
+beveled
+condemn
+alumna
+cords
+admittance
+annotates
+citing
+corrector
+appreciative
+branching
+betrays
+buttoned
+ailment
+boulevards
+bottlenecks
+chamberlains
+bedbug
+covenant's
+crispness
+considering
+broadcasts
+audubon
+arousing
+correction
+barrack
+closure
+contrastingly
+brittleness
+assassin's
+bursa
+bungalows
+balked
+conceptual
+carcasses
+arabia
+blueprint's
+affectingly
+consorting
+buses
+auger
+appointed
+brute's
+bosoms
+anyway
+arrowed
+anaphorically
+clarify
+approachability
+assistance
+buzzes
+commonplace
+bluebonnet's
+adroitness
+availers
+aquifers
+architecture's
+action's
+backgrounds
+abduct
+attired
+briber
+admissibility
+cease
+beck
+auctioneers
+birdbath's
+atomic
+crossing
+considerate
+biconvex
+bulge
+bedridden
+arising
+aggression's
+cherish
+bureaucratic
+abater
+amputating
+atop
+climber
+clutched
+afford
+bisections
+bonnets
+commendations
+bloke
+abundant
+clamp
+aloes
+aboard
+atheistic
+advantageously
+buffs
+chimney's
+cheerily
+benefactor
+ample
+bushwhacked
+captain
+buckskins
+contextually
+antiquarian's
+browns
+bubble
+ban's
+brine
+acculturates
+anhydrously
+beaver's
+advantaged
+bibliographic
+clasping
+clattering
+coerce
+colorado
+airmen
+bandlimiting
+balks
+boners
+attached
+chosen
+convened
+bordello
+composer
+botanist
+backtracks
+civilization
+commutativity
+bloodshed
+cohere
+bunkhouse
+archdiocese
+boycotted
+crosswords
+bedspread's
+anteaters
+cove
+apothecary
+chute's
+addressee
+climatically
+blower
+bane
+cask's
+beetling
+ambiguities
+before
+abstain
+arachnids
+bucket's
+amateurs
+blackouts
+adverb
+butchery
+conjunction's
+barricade
+audiologists
+aphorism
+complete
+butts
+bishops
+allotment's
+confusingly
+channeller's
+blanches
+bragging
+bathe
+comedians
+celestial
+citizens
+couple
+backpack
+aphasic
+brothels
+axles
+cancellations
+bonus's
+consolidates
+authoritative
+axle's
+acclimatization
+carolinas
+chime's
+antibiotic
+bisons
+biographically
+achieve
+bleachers
+bicentennial
+behavioral
+accomplish
+concealment
+biddies
+antitoxins
+arriving
+apprehend
+affluent
+cliffs
+bleached
+astronomers
+connection
+bride
+backs
+bog's
+casket's
+continual
+ampere
+cat
+alternator
+cotton
+athletes
+communicant's
+best
+befuddling
+benefactors
+appease
+annoyingly
+context
+astonished
+cracked
+amnesty
+autumn's
+binder
+babying
+contributory
+assumption
+cowls
+cocks
+airless
+consummated
+atypically
+beneficially
+chairing
+accusative
+commanded
+bufferrer's
+alerter
+arbiter
+civilly
+charms
+backscattering
+cheater
+bushes
+caverns
+chieftain
+calf
+comparing
+aurora
+butyl
+cower
+bemoans
+baptistry
+carpenter's
+capes
+bordered
+arrows
+blocker
+crest
+appeal
+arabic
+conventions
+axis
+brains
+bookkeeper's
+circle
+cooks
+circumlocutions
+adventists
+barringer
+affording
+anatomically
+basements
+barbarities
+configuration's
+contributes
+collaborating
+beach
+comet's
+bakes
+assigns
+ballerina
+cheapens
+clinging
+conquered
+bisecting
+closenesses
+bugle
+boatmen
+beatings
+complicator
+bight's
+banister's
+archaic
+anthropologists
+clams
+beginners
+committee's
+communicants
+alone
+bounteously
+bastes
+ascertain
+alphabetical
+bringing
+batters
+amazon's
+constituent
+benders
+being
+constitutionally
+audiometric
+blast
+copings
+bailiffs
+colts
+coolies
+airlift's
+boomerang
+bifocal
+clothes
+cashiers
+congenially
+billows
+boilerplate
+biochemistry
+betting
+brimmed
+complementers
+breading
+bragger
+adducting
+bisectors
+abrogates
+criticized
+comrade
+bucolic
+birthright
+blurs
+challenger
+complicated
+bluebonnet
+biscuit's
+classmates
+campus's
+boundary
+bedbug's
+adjustor's
+acre
+bicycling
+awe
+additions
+baiter
+authorizes
+beautify
+copier
+buffet
+belfries
+acquisitions
+brooch
+crickets
+caterpillars
+beefsteak
+complicating
+bedpost
+criminal
+celebrity's
+bookseller
+christened
+coerces
+clamors
+all
+boatyard's
+canoe's
+begin
+anaerobic
+bushing
+agreers
+concedes
+countermeasures
+beg
+agglutinin
+bunted
+ammonium
+aspiration's
+bathrobes
+changeable
+beached
+bestowal
+beaner
+catsup
+admires
+clockwise
+agile
+alarms
+ached
+chinks
+buffer's
+cartesian
+annunciate
+chanticleer
+avenue
+anchor
+alliterations
+blanking
+bargained
+breathtaking
+crime's
+assiduity
+argentina
+contiguously
+aqua
+bested
+borderlands
+appetite
+captive's
+bipolar
+conceal
+counters
+costumed
+arrestingly
+bunting
+blight
+champagne
+brusquely
+address
+bloodhounds
+associative
+creed
+arithmetical
+balustrade's
+belabors
+complementing
+checkout
+archivers
+badlands
+behaviors
+ampoules
+bridgehead's
+antiquarian
+clumsiness
+considerable
+apportions
+anglicans
+appealingly
+barfly's
+absorptions
+awards
+congregates
+cloister's
+armour
+avoid
+correctively
+chucks
+burps
+bums
+berry
+batches
+administration
+atones
+bishop's
+blonde's
+casualty's
+cores
+bodied
+alter
+assonance
+apprise
+antitoxin
+avariciously
+checkpoint's
+affirmative
+conjures
+angstrom
+aesthetically
+canyon
+binge
+crazed
+breastwork's
+aids
+boston
+conceits
+announcement's
+beechen
+accessory
+authorities
+constrained
+automation
+anaplasmosis
+commander
+commendation's
+belabor
+cornfields
+artemis
+asphalt
+contracted
+brochure
+crafted
+allegedly
+alien's
+auditory
+blowfish
+adducible
+confederations
+annuals
+britches
+acquaintance
+appallingly
+abounds
+burglarproof
+crossers
+bayous
+brisk
+authority's
+covetousness
+averse
+accomplished
+aromatic
+admiral
+bijective
+avenging
+bran
+boatyards
+beseeching
+challenging
+bares
+acts
+abductions
+compendium
+compulsion's
+calendar's
+clad
+blockage
+conventional
+craze
+cajoling
+acceptability
+bungalow
+buff
+cramps
+attackable
+calculator's
+asp
+braved
+colors
+balling
+contaminate
+crackling
+comes
+complimenters
+across
+astronomy
+aborigine
+bobwhite's
+autopilot's
+chattered
+appall
+autonavigator
+bashed
+acoustics
+beachhead's
+apartments
+convenience
+blackout's
+bands
+autonomously
+amounters
+centripetal
+achievable
+astringency
+attuned
+concatenating
+copyright
+coding
+assumption's
+anastomoses
+confiscate
+asking
+beneficial
+adhesions
+busboy
+bronzes
+audacity
+bruises
+crash
+beau's
+circuit's
+aborts
+baubles
+beliefs
+assuaged
+costed
+blinking
+characterized
+bowled
+block
+conquests
+confesses
+amusers
+ceiling
+berets
+berliner
+abstentions
+child
+authoritatively
+closeness
+bushel
+considered
+communicates
+cheerlessly
+autofluorescence
+aquarium
+affects
+appurtenances
+airbag
+approaches
+admonishments
+bets
+bounden
+courtly
+bodybuilder's
+campus
+brainstorm
+americans
+chairperson's
+botanical
+askew
+amazon
+bleed
+clime's
+cooperations
+commonness
+boatloads
+blinked
+courtyard
+adapted
+aforethought
+backwater
+burr
+cathode
+awaking
+buzzed
+bridgeable
+arrives
+adventuring
+beseech
+attrition
+copied
+colon
+client's
+bandstand's
+advice
+baptistries
+antithetical
+alcohol's
+contradicting
+ambidextrous
+belches
+category
+bluntness
+coupon's
+assimilations
+comfortable
+caller
+affliction's
+attends
+compactest
+baler
+beacon
+blind
+bleakness
+beseeches
+courts
+couch
+consequential
+adulterers
+craving
+biggest
+astray
+bigoted
+barfly
+charges
+ambiguity's
+commentary
+crankily
+cowerer
+carnival
+bachelor's
+bituminous
+continuance's
+calamities
+claws
+apiece
+century
+ascendancy
+charts
+animations
+aggression
+chickadee's
+carve
+confidence
+actor
+bubbled
+becalming
+convulsion
+chivalrous
+brightest
+centralized
+beautifies
+amateurishness
+birthrights
+alligator
+circumstantial
+constructors
+conceptions
+arranging
+cart
+cent
+ager
+congruence
+carrot
+chariots
+cloudier
+captivity
+conquerers
+compartmentalizes
+condensing
+celebrities
+chalks
+accordance
+chilled
+conversations
+apples
+conceiving
+average
+blessed
+creator
+ant
+cling
+annoyer
+aviation
+cohesively
+correspondences
+boor's
+apprehended
+bessel
+both
+characterizes
+bards
+cots
+acculturating
+cemeteries
+carting
+alcohols
+bitterest
+ascetic's
+conducts
+caking
+airspace
+autocrats
+ashes
+chimes
+broadcaster
+commuter
+basket
+borderland's
+broadened
+boyish
+allegretto's
+ban
+bidder
+christen
+blessings
+bury
+arranged
+choir's
+apathetic
+boring
+aryan
+appearing
+binds
+cooperates
+bounces
+airspeed
+complicators
+adapting
+babbled
+agglomerates
+bedraggled
+addictions
+bolt
+calmly
+blur's
+boatload's
+anesthetic
+bugs
+colt
+completing
+boxer
+billers
+affronting
+absurdity's
+chides
+comparatively
+braided
+clipper's
+cot's
+calves
+articulations
+branchings
+attraction
+concatenates
+alligators
+cake
+boom
+crashing
+afar
+abler
+beamed
+adverse
+adrenaline
+agriculture
+beehives
+crankier
+courthouses
+advises
+consigns
+bisect
+azimuth's
+carpets
+arthropod
+brewery's
+commonalities
+altruist
+astride
+appreciate
+carved
+briefs
+admitter
+celery
+congregate
+clocking
+assassinated
+adding
+canvasser
+civics
+contemptuously
+calculates
+advisees
+bumbling
+algorithmically
+cloudy
+algebras
+addiction's
+cop's
+assurers
+confidently
+affector
+analyzers
+chimneys
+burdening
+antitrust
+admix
+avoidance
+choking
+coexists
+accustoms
+cellar
+anchovy
+constructor's
+confinements
+consequently
+accelerations
+accoutrement
+churchman
+biller
+affected
+brigades
+cremating
+corridor's
+bagging
+ah
+berating
+collective
+acuteness
+arrestors
+cab's
+border
+agitation
+animism
+arches
+alveolus
+cessation's
+averrer
+abash
+counterrevolution
+attesting
+animateness
+bawdy
+americana
+bloodstained
+applicator
+annotating
+annunciator
+clamored
+acting
+aerosols
+axiomatization's
+brags
+coalesces
+avocation
+combining
+crazily
+bravery
+burying
+adored
+airfield's
+accounting
+broadeners
+anise
+chimney
+added
+avenges
+bellicosity
+cranberries
+arsenic
+communities
+comparable
+bunkered
+architect
+alphabetically
+beautified
+apogees
+communist
+anatomical
+complexity
+accost
+autographing
+browsing
+ameliorate
+bookers
+bandaging
+clinical
+appellants
+counteract
+clairvoyantly
+bootstrap's
+canner
+boastful
+attainer
+ash
+beaded
+brake
+barest
+befriend
+burglarproofing
+allegorically
+bunts
+believes
+accession's
+buck
+boathouse's
+byword's
+anthracite
+accuse
+conjunction
+burping
+commandant's
+creativity
+affirming
+bark
+amuses
+balcony's
+auditors
+counsel
+clamber
+borates
+cowboy's
+bickered
+boors
+combing
+biting
+breeze
+crowder
+corn
+bloke's
+bombast
+bookstore
+blared
+bedlam
+carbohydrate
+coops
+bundles
+blistering
+antarctic
+anterior
+bilinear
+chocolate's
+context's
+alternating
+annoyance
+constancy
+ambivalently
+buddy
+brutalize
+bobbin
+alleles
+commotion
+attributes
+airborne
+creed's
+bolstering
+coaxed
+airframe
+breaker
+accept
+abashes
+attentional
+contributor
+comparability
+auscultating
+cocked
+computationally
+buffered
+career's
+analyzable
+absently
+courtyard's
+buildups
+apportioned
+balkanized
+annulling
+cremation
+buffetings
+conditional
+confided
+airliner
+bulldozer
+approaching
+anagram
+apollonian
+canaries
+bloat
+bluebird
+collision
+cool
+connectedness
+abasement
+artisan's
+avoidably
+clerks
+afflict
+briton
+corroborates
+cameras
+counted
+boldest
+burglars
+brutes
+brows
+abhorrent
+configuring
+averaged
+ace's
+buying
+abandon
+bayou
+cottons
+auditioning
+amplifies
+clippers
+brainstorm's
+alto
+brutalities
+bunch
+agricultural
+bursts
+blunting
+archer
+activity
+carefulness
+bedroom's
+concomitant
+balm's
+artificer
+barking
+breathy
+babies
+acacia
+bodies
+cap's
+criticised
+conversed
+crewed
+ascendant
+budgeting
+coroutine's
+charmed
+bellboy's
+conservatism
+butler
+acculturation
+conclusion's
+adapt
+cellist
+contempt
+adumbrates
+borrowed
+confounds
+allegiance's
+blabbermouths
+accrues
+captor
+coop
+baseballs
+cottages
+apartment's
+assertiveness
+assent
+artfully
+bagger's
+abolishment
+acetylene
+accessory's
+blackbird
+baptist's
+consist
+cavern
+buttock
+corporal's
+autoregressive
+bailiff's
+birds
+corder
+bracketing
+antlered
+barbiturates
+county's
+addicted
+agglutinated
+abashed
+competitively
+captains
+bloating
+accepts
+choose
+ashamed
+backyard's
+apiary
+contradiction
+balalaika's
+arctic
+broom
+anvils
+coffee's
+alliance's
+agitator's
+change
+adjusters
+cremates
+complexes
+bodyguard's
+burl
+antithyroid
+ambient
+airfoil
+apricots
+athleticism
+abjectly
+bankrupts
+answerers
+alternatively
+confronter
+breaking
+baronial
+cannibalized
+appetites
+breaded
+blackboard's
+battlegrounds
+cosine
+barrenness
+abbreviation
+budging
+boolean
+acrobatics
+again
+ashtrays
+clashed
+contingent's
+compulsion
+bedazzled
+collapsing
+comparison's
+businesses
+compassionately
+achievement
+buffering
+candlesticks
+austerely
+awls
+associate
+absolved
+annexed
+airway
+clipping
+counselors
+conscience
+attempters
+constructing
+biases
+cautioners
+comma's
+cosines
+char
+auscultates
+afire
+comely
+amity
+beverage's
+anew
+ballplayer's
+adulterated
+authorship
+alterers
+burdened
+attributive
+afflictions
+blinded
+barrier's
+attachment
+brotherhood
+bridegroom
+atoms
+cobweb's
+copes
+controversies
+complexion
+crawling
+atomized
+adjust
+accuracies
+concern
+cinders
+authorization
+appraisingly
+bladder's
+cooked
+cowers
+batter
+commissioner
+close
+burglar's
+allocated
+anvil
+aftershock
+abrogating
+chemistries
+advisable
+conduct
+committee
+blaring
+appalling
+braveness
+alertly
+artificialities
+brevet
+collision's
+arizona
+bower
+creamers
+awnings
+arsenals
+crane
+city
+contemplative
+catheters
+administrators
+attorney
+churned
+attractions
+columnation
+bobbed
+centipedes
+bostonian's
+apprises
+buries
+allege
+botulism
+adobe
+ambassador's
+covenants
+boon
+asynchronously
+bigness
+axial
+chaffing
+battleships
+ant's
+anthropological
+accent
+brushing
+brassy
+consumptions
+battleship
+absorb
+beckons
+brook
+connectors
+clinches
+accesses
+beaters
+archaicness
+bursitis
+chided
+bomb
+assimilated
+addicts
+convening
+arianists
+counting
+altar's
+confusions
+attachment's
+clipping's
+amazing
+corset
+bossed
+attach
+commandingly
+animatedly
+allegations
+assuages
+annulment
+compress
+aptitude
+absurdities
+autobiographic
+aspect's
+concentrator
+burgesses
+anagrams
+bedeviled
+assemblers
+convinced
+commentary's
+agglomerated
+biological
+callousness
+axolotl's
+atmospheres
+authoritarian
+cancer's
+above
+charting
+aldermen
+battler
+cistern's
+bouncer
+amassed
+conquest
+altering
+arrogantly
+brokenly
+comparator
+counsellor's
+attenders
+cackle
+criticize
+authored
+ably
+believed
+compelling
+accepter
+cleansed
+afflicted
+backslash
+computed
+almighty
+attache
+braes
+carriage's
+benediction
+brigadier's
+contemporariness
+boomtown
+amplitudes
+breakwaters
+clod
+catch
+bar's
+activist
+caves
+assenting
+camp
+attainments
+brotherliness
+continuances
+appearance
+applicator's
+browbeats
+banjos
+addendum
+became
+adduces
+armadillo
+brothel
+almanac
+courageous
+assault
+chunk
+coaching
+atheist's
+blunted
+aperiodicity
+congresses
+boastfully
+burglarproofed
+broadest
+bashfulness
+affect
+acne
+bottleneck's
+criticisms
+corrupts
+colonized
+closeted
+canonicalizing
+auditorium
+antenna's
+awfully
+anti
+consumes
+agonize
+algebra's
+championing
+blush
+bugger
+antagonize
+beethoven
+blase
+boycotts
+compensatory
+bugged
+boroughs
+anatomic
+batons
+arguably
+affricates
+appreciations
+cavalry
+alumna's
+arcing
+backpacks
+braces
+contextual
+coupon
+chillingly
+allocates
+abuts
+contribution
+commodity
+admonishing
+coolly
+cabinet's
+collapsed
+confessions
+adjured
+capriciousness
+chastising
+babe
+aerodynamics
+accepting
+concept
+contour's
+consequentialities
+birthday
+bankrupted
+birthed
+benefit
+concentrations
+azalea
+channels
+chestnuts
+contenting
+antedate
+censors
+contagious
+abbot's
+channellers
+apt
+commend
+avocation's
+admonition's
+abolition
+confederation
+carried
+clumsy
+coincidences
+bumper
+burr's
+bugles
+bribers
+attainably
+consume
+comma
+creativeness
+accuser
+bombs
+abbey
+baffled
+aside
+clip's
+appeases
+compass
+bundling
+abstractionism
+confide
+creases
+apropos
+confronted
+corrective
+concurrencies
+autocratic
+alien
+attending
+antagonistic
+broadcast
+asymptote's
+belied
+breasts
+contrapositives
+coiner
+accordingly
+cohering
+computers
+cow
+bibs
+ancestral
+controller
+attacker
+alerts
+coconut
+agency
+alerted
+alcoholism
+ammoniac
+actinometers
+acquitter
+bud
+cessation
+alleging
+centralizes
+articulators
+council's
+carvings
+arduously
+blown
+anode's
+arrogate
+bisects
+centimeters
+burgeoning
+course
+appointee's
+ascribable
+communicate
+contrivance's
+adoptions
+attune
+acres
+abyss's
+corporal
+certifiers
+analyze
+augusta
+bestseller's
+checkpoint
+coexist
+attainers
+argon
+bearded
+crudeness
+averaging
+brick
+adducing
+annulment's
+chicks
+blocked
+cisterns
+afoul
+affiliates
+briskly
+adhesion
+ascertainable
+appeasement
+blueprints
+agreements
+blindfolds
+communicator
+characterization
+annoyances
+breeches
+brushed
+clinic
+competes
+chuckled
+cradled
+balmy
+antisubmarine
+alternate
+armpits
+barn's
+conjuncts
+adhere
+allows
+counteracted
+appetizer
+capturers
+cleanse
+avant
+abbe
+corpse's
+arduousness
+badge
+begets
+contemplated
+caveat
+copiously
+athena
+aggrieving
+alibi
+accumulation
+basket's
+aftershocks
+bass
+conjuncted
+chaps
+brunch
+colonials
+bibbed
+clusters
+antagonizing
+constituencies
+combings
+bearish
+continuously
+adequacy
+brow's
+catalog
+alderman
+comedic
+chemists
+concernedly
+conceded
+alarm
+arced
+buckle
+confidingly
+coherent
+closes
+buffoon
+brace
+adjustably
+crackers
+contamination
+burgess's
+aerobic
+constitutes
+baptismal
+broadness
+blimps
+concatenation
+claiming
+bard's
+aerosolize
+adjoins
+copies
+coats
+boggle
+corroborated
+concreteness
+bill
+cautions
+bantam
+bearably
+armchair's
+birthright's
+cravat's
+cone's
+courtiers
+asunder
+bulletin's
+biopsies
+alley
+contrive
+blasphemies
+amuser
+ballerinas
+blushed
+causticly
+brandy
+blinkers
+complimenting
+crimsoning
+angola
+apprehensiveness
+bolster
+columnate
+byproducts
+berths
+accusal
+chubby
+arrived
+camps
+anaconda
+cook
+airfoils
+atlantic
+boosted
+converge
+availer
+blemish's
+appalachians
+coffin's
+boarding
+alga
+crouch
+columnizing
+consul's
+chastises
+angling
+apple's
+billiard
+attentiveness
+adroit
+apprehensible
+cereal
+blouse's
+browning
+bodybuilder
+coaxing
+assertion's
+connective's
+commemorated
+accountability
+crooked
+blips
+chandeliers
+aristocracy
+bangs
+coke
+abutment
+community
+calculus
+congregated
+crepe
+compromised
+airlines
+contributing
+contingencies
+coordinated
+alginate
+batted
+contender
+alma
+antagonisms
+accompanied
+airport
+administrator's
+appraisal
+breadbox
+condemnation
+backlog's
+available
+consequents
+crooks
+commonwealths
+barring
+channeller
+crucially
+archaeological
+charming
+adventist
+credits
+appetizing
+breads
+clients
+climbing
+aloneness
+abstractness
+appearer
+astute
+clockers
+antagonizes
+agonized
+bastard's
+conjectured
+aqueducts
+aureole
+boatswains
+conjured
+chauffeur
+complementer
+behold
+bustards
+bivouac
+cluck
+anus
+bless
+catastrophic
+bounty's
+allowed
+answer
+concealers
+brainchild's
+coercion
+buzzword's
+bordellos
+appertain
+applier
+couriers
+aesthetic's
+craft
+capacitances
+capped
+coupler
+category's
+anvil's
+conquest's
+checksums
+clucking
+bronchus
+acrimonious
+changeably
+accenting
+argued
+conditioning
+brewing
+backwardness
+cascaded
+atomize
+contours
+arianist
+apart
+conflict
+carefully
+banshee's
+conveys
+arbitrates
+amphitheater's
+amen
+alimony
+bound
+buzz
+courtroom
+apparently
+coalescing
+circulating
+amounter
+bypasses
+breadth
+choral
+completion
+arisen
+anticipating
+bilges
+contractions
+bedspring
+commune
+blacklisted
+beagle
+alkaline
+atolls
+carelessly
+blimp
+corking
+brevity
+alterable
+canada
+bear
+bluntly
+cartridges
+connoted
+countries
+corroborate
+consecration
+corrupted
+appreciating
+combatant's
+alkalis
+affecting
+blues
+casserole
+ballad
+bewitches
+common
+as
+because
+bathroom's
+anchorages
+beguile
+connect
+convenience's
+counteracting
+assorted
+care
+contains
+centimeter
+ancestors
+briefings
+busses
+churchyards
+breakable
+amortizing
+courthouse's
+click
+courses
+ajar
+county
+covet
+confidences
+capitalizer
+agog
+backtracking
+copious
+bestsellers
+chilliness
+bringer
+browse
+centipede
+bawled
+bricklayer
+breath
+assailants
+abysses
+command's
+characterizer
+calculating
+america's
+aurally
+contain
+alias
+commentators
+confounded
+appending
+accidents
+chatters
+coordinates
+bleeder
+blueness
+badger
+bolsters
+astounding
+capitalist's
+conservation's
+commences
+aimed
+bun
+comparators
+competition
+bauble
+audiometry
+affinity's
+amalgamates
+cowardly
+consolidating
+beads
+brackish
+bookings
+accuses
+bog
+compartmentalizing
+clutching
+calming
+collars
+clambers
+banqueting
+beaked
+authoring
+correspondence
+apostrophes
+affirmation's
+bespeak
+costing
+brought
+backbend's
+bled
+assassinate
+chop
+anemometer's
+cobbler
+coldness
+complainer
+battalions
+asymmetry
+boathouse
+canyon's
+awarded
+amplitude
+anarchical
+anticipatory
+bolder
+cooperatives
+caterer
+adviser
+balkanizing
+augur
+cannibal's
+balustrades
+attaching
+collector's
+commercials
+capaciously
+coincidence's
+bumps
+ascot
+bale
+blackmail
+baby
+aftereffect
+bloomers
+buttresses
+avenues
+climaxes
+aqueduct
+cater
+brainchild
+avail
+bypassed
+bowl
+california
+cements
+boxes
+brained
+bedevils
+captors
+acuity
+ascends
+breakthrough's
+assigner
+caner
+bequests
+ceilings
+axers
+bookshelf
+autistic
+celebrations
+axons
+chiding
+asterisk
+allophonic
+blindingly
+cherubim
+boaster
+confining
+anxious
+clowning
+advisement
+approach
+anesthetic's
+crescent
+alertedly
+birdbath
+beardless
+bras
+auspices
+choosers
+approval's
+afflicts
+corrosion
+arpeggio's
+bodyweight
+cranky
+battlefront
+affirmation
+churchyard's
+aeroacoustic
+anders
+adjustment
+baneful
+citation's
+acetone
+blend
+binuclear
+boner
+annotation
+announce
+claimable
+contemporary
+clothing
+acquitting
+choosing
+attacher
+bananas
+binaural
+arrestor's
+aches
+conclude
+collaborators
+await
+blaspheme
+bequeaths
+crows
+balconies
+begging
+conducting
+abstracts
+assignee's
+causations
+approximation
+articulated
+considerably
+apricot's
+afferent
+assertively
+bonding
+calms
+cranberry's
+cost
+captaining
+agenda
+corridors
+complaint
+christens
+aggravate
+countess
+arbitrators
+ascribing
+breech's
+bellwether's
+burglarized
+confinement's
+animating
+adjectives
+cannister's
+bemoan
+cleanest
+acme
+cheapest
+activities
+allophone
+boy
+belaboring
+captions
+compactor's
+actuator's
+befouling
+arachnid's
+computerizes
+compile
+absorption
+bridled
+absorber
+convicts
+birch
+alkaloid's
+cannot
+bacilli
+charitableness
+abated
+ceaseless
+beavers
+bookshelves
+commensurate
+appreciates
+basil
+cartoons
+aides
+buxom
+cages
+cantor's
+acceptances
+antiquated
+amalgamate
+babyhood
+beers
+conforms
+bouquets
+canner's
+baste
+cashed
+argue
+butcher
+backbones
+absolve
+crib's
+cafes
+abstracted
+book
+committees
+authentically
+conference
+antisera
+bourgeoisie
+attribute
+biddy
+autobiographies
+chivalrousness
+coverlet
+ambiguously
+calorie
+anhydrous
+alignments
+around
+archfool
+advance
+bedpost's
+affective
+contained
+amain
+bromides
+clogs
+bricker
+arduous
+consistent
+amidst
+confess
+complain
+anniversaries
+coasting
+cobwebs
+aries
+benchmark
+aviaries
+bombard
+boxers
+ashtray's
+assyriology
+blaze
+ablative
+chaos
+burro
+arguer
+ashamedly
+crier
+allocator's
+aggressively
+carts
+advisory
+airship
+alkali's
+backup
+chaining
+continue
+cartoon
+circumference
+breadwinners
+autonomy
+banking
+armored
+cabin
+chunks
+antigens
+blistered
+airers
+breakaway
+belief's
+belays
+coveting
+auburn
+careful
+anybody
+bumbled
+cautious
+adopter
+ballplayers
+anteater
+citadel's
+avails
+agent's
+caliphs
+bridgehead
+already
+caterpillar's
+coachman
+centralizing
+alphabet
+concede
+barbell
+breadboard
+ballast's
+activators
+attendance
+blandly
+calculator
+codeword
+addressee's
+avenue's
+alcoves
+alternately
+admonishes
+concentrate
+crossbars
+adjoining
+basset
+carbons
+beast
+blonde
+castle
+clarification
+bitch's
+abrasion's
+books
+amputate
+bicycler
+aphonic
+arraigns
+acquiesce
+buster
+chaperon
+advisements
+buyer's
+attack
+birthdays
+blazed
+confuser
+crag
+ballet
+airports
+bison
+counterexamples
+arteriole
+colony's
+adamantly
+blunders
+chivalrously
+adult's
+authors
+amplifiers
+counterfeited
+complicity
+astrophysical
+axolotl
+bash
+battleground
+butterfly's
+axioms
+allegory
+blitzes
+blindfold
+bufferrers
+approximating
+byways
+computations
+alight
+avoiding
+assurance's
+barrages
+canonicalized
+callously
+auditing
+authenticating
+bag's
+asters
+artistic
+bonanzas
+applaud
+certainties
+auto's
+concession's
+cascade
+chubbiness
+churchyard
+afternoons
+antigen's
+baron's
+amphibian
+banister
+capitalize
+approval
+appropriated
+bureaucrat's
+covets
+cloisters
+circulate
+bivalve's
+beta
+collector
+among
+cane
+birdlike
+attenuating
+conjunctions
+appliance's
+coral
+crucify
+abnormal
+combined
+classroom
+buckskin
+commissions
+abolishments
+arching
+croak
+americium
+associates
+car's
+assuringly
+agreer
+anticoagulation
+closure's
+corkers
+attend
+alphabet's
+awakening
+composedly
+attracted
+construed
+cricket's
+applicability
+autonavigator's
+chloroplast's
+ashen
+beggars
+corporation
+another
+conflicts
+bootlegs
+archeologist
+alcove's
+agitates
+cargoes
+creditor
+cops
+advisably
+coronation
+bourgeois
+crochets
+cropper's
+cramp's
+adulterer's
+corroborations
+changing
+combinatorics
+calm
+comprehensible
+blooms
+coolness
+copying
+blacksmiths
+commodore
+compulsions
+clump
+afterward
+crucified
+brooder
+buckets
+accelerating
+accented
+boat
+adventitious
+baseline's
+courier
+calamity's
+atoll's
+brutalizes
+bundled
+chairperson
+cheeses
+continuation
+celebrating
+apologists
+behest
+bumpers
+consonants
+circulation
+betraying
+commuting
+breezily
+circumstance
+coughing
+benefiting
+conquerors
+chemically
+commencement
+adjustors
+angel
+congratulate
+conspired
+causally
+bud's
+conquers
+augmented
+bereaving
+advisor
+articulation
+angler
+admission
+bide
+competitors
+amusement's
+collecting
+adder
+arithmetized
+cheek's
+apostrophe
+blockages
+clockwork
+bubbly
+apricot
+adjudicated
+banter
+amused
+breacher
+bracketed
+aimer
+comprehending
+bunkers
+canton
+arcane
+absent
+capitol
+consequence
+cognitive
+abjuring
+clever
+coronet
+anathema
+artichoke
+controls
+credulous
+acid
+crawled
+coupled
+boomtowns
+aspen
+acted
+anyhow
+burdensome
+backdrop's
+apocalyptic
+cornerstone's
+cautiously
+blisters
+conveniences
+arbor's
+accessories
+alleges
+clubs
+accompaniment
+blazes
+annually
+clique's
+beamers
+ballgown
+autumnal
+acreage
+conjunct
+balances
+consoling
+canvas's
+competent
+aggrieves
+although
+afraid
+clearly
+cognizance
+acoustic
+colleague
+causing
+absences
+closers
+airs
+cinder
+adversaries
+altruistic
+brews
+ceremonially
+appraisal's
+commissioners
+army's
+assists
+acceptor
+comparison
+cooling
+conveniently
+couching
+changes
+clinic's
+confronting
+adjunct's
+blandness
+alternates
+bunter
+consequent
+clean
+autos
+accumulators
+carver
+aprons
+awful
+bobbins
+blasphemy
+assuming
+abscess
+assemble
+cabinet
+atomics
+blacklists
+audacious
+assay
+anthropology
+barnstorm
+awl
+bumping
+assembles
+capture
+compensates
+coverable
+amend
+array
+continually
+absented
+cigarette
+antiresonance
+backspace
+branched
+appellate
+courtroom's
+alienated
+austerity
+cement
+asked
+antelopes
+cottager
+bluebonnets
+booze
+amendment's
+backslashes
+begun
+bijections
+cafe's
+boatload
+collect
+appeals
+belittles
+befit's
+beauty
+arrogated
+academia
+contagion
+blemishes
+coverlet's
+comfortability
+antecedent
+controllably
+congressman
+complicate
+coincide
+arrears
+clumped
+credited
+buffoon's
+catholic
+accompanist
+beauty's
+aster's
+blatantly
+bothering
+bewilder
+canceling
+carbonizer
+accentuation
+backstairs
+anticipations
+bestowed
+civilian
+blooming
+blunts
+airlocks
+argo
+blueprint
+aristocrat
+cakes
+complements
+ale
+camping
+army
+adrift
+bengali
+barely
+blasphemes
+briefcase
+brooches
+ailments
+blazers
+crevice's
+bankrupt
+archiver
+articulator
+alphabets
+bonds
+colliding
+candidate
+cashier's
+bellwethers
+airstrip
+announcers
+calendars
+corrupter
+aqueduct's
+axiom
+bathing
+blusters
+ascribed
+admittedly
+angrily
+analytical
+contraption
+convertibility
+abysmal
+cathedral's
+aversion's
+algol
+articulately
+breveted
+bickers
+chatterer
+adoptive
+bijectively
+cloudiest
+coarseness
+carted
+cocktail's
+capacious
+anion
+buffoons
+bleeding
+bedrock
+adventurer
+compositions
+camouflages
+brittle
+chip's
+aloe
+chorus
+cargo
+critical
+biographer's
+abject
+blasphemousness
+charmer
+betray
+blacking
+awoke
+allele
+bags
+claimant
+clover
+biographies
+confound
+advertises
+crafter
+cripples
+bygone
+concentric
+couldn't
+contentions
+acrid
+costume
+aft
+aesthetic
+bandits
+adducts
+constellations
+coffer's
+created
+commercial
+art's
+cookie's
+ammonia
+adjunct
+articulateness
+congratulated
+crags
+brandishes
+annual
+byword
+affection's
+college's
+aboriginal
+bikini
+buttering
+allotter
+console
+advent
+activates
+beverage
+april
+acceptable
+barrel's
+boys
+attractor
+azimuth
+critics
+ballooner
+aren't
+adulterating
+criticise
+abeyance
+automatically
+collaborative
+capabilities
+crawls
+anomaly's
+climaxed
+animately
+aroma
+belie
+attires
+argumentation
+baseboard
+bluebirds
+cactus
+byproduct
+balancer
+beholder
+conservationist's
+betrayer
+agony
+accusingly
+convict
+coaxes
+breeds
+agitated
+championship
+brevets
+auscultate
+counselling
+cornerstones
+america
+canoes
+aspirator
+compensate
+antiseptic
+bereave
+absinthe
+compose
+collide
+alabamian
+candid
+civilized
+clamps
+authoritarianism
+colonist
+bugging
+bins
+abashing
+battlers
+canning
+berate
+assembler
+amateurish
+boasted
+angriest
+bluffs
+colonize
+balcony
+bleat
+bustard's
+attenuate
+contagiously
+bicep
+babel
+beatniks
+brush
+analogy's
+audiologist
+assessment's
+camera
+arbitrary
+alleyway's
+concession
+constructions
+accompanies
+accretion's
+aroused
+charcoaled
+belated
+bottom
+bloodshot
+bisques
+advocate
+arabs
+cathodes
+adamant
+challenge
+absurdly
+abolitionist
+cleavers
+bludgeons
+bassinet
+clause
+coiling
+cask
+boob
+azalea's
+afghanistan
+carriages
+blade's
+bobby
+asinine
+acclaiming
+absorbed
+blacken
+cheating
+bootleg
+anonymous
+addict
+astonishes
+awry
+adequate
+categorization
+casks
+blaster
+aspirants
+abscesses
+airing
+assumptions
+capitalists
+board
+asynchronism
+body
+aye
+contraction
+athens
+arsine
+cohabitations
+below
+bows
+aviator's
+ampoule
+connective
+adapter
+authenticate
+blackboard
+brilliant
+appoints
+attics
+conquer
+boning
+comestible
+camped
+blonds
+aisle
+coals
+billboards
+characterizers
+crow
+clout
+admirer
+actuarially
+abstruse
+accessing
+bonfires
+clenched
+characteristic
+catching
+chars
+canons
+barrier
+championed
+butterflies
+completely
+calendar
+artwork
+abjections
+burgher's
+correlates
+arrivals
+accepters
+circuses
+breadboards
+accomplishment
+analyzed
+appropriates
+cancel
+bordering
+aperture
+civilizing
+assortments
+blackest
+blitz's
+copy
+commenced
+admirers
+cheers
+croppers
+cliff's
+circumstance's
+bibles
+buttressed
+consecutively
+birefringence
+automaton
+cheerless
+chopping
+ballooned
+convent
+acknowledgers
+appointing
+belies
+comeliness
+bangle's
+communication
+bisector
+avocations
+clique
+brainstem
+campusses
+allocators
+bramble's
+assaults
+commemorate
+appendix
+agent
+apportioning
+bottled
+artifact's
+block's
+archery
+bagatelles
+candies
+catched
+cognitively
+creepers
+concentrated
+bout
+balustrade
+abodes
+carrying
+confirming
+cannibal
+chinners
+carbonate
+anguish
+butt
+colons
+ablated
+corporation's
+cock
+convincers
+beret's
+bluish
+compressive
+authenticates
+commemorative
+bureaucracies
+coinage
+coach
+assigning
+concentrators
+capitalizing
+appraisals
+belaying
+candy
+blossomed
+bricks
+atonal
+analogue
+caters
+barbaric
+applique
+clink
+audio
+actress
+assyrian
+apprehension
+conversation
+apsis
+bedevil
+comics
+affricate
+comings
+buttress
+angering
+buckboards
+bombed
+adversely
+adequacies
+commended
+causeways
+adherers
+codes
+aquaria
+ape
+bulks
+compactly
+brainwashes
+bleats
+commandants
+conditionally
+adjourns
+clobbering
+allowances
+buildings
+complemented
+blanker
+algeria
+brief
+creak
+adductor
+categorizer
+approacher
+argument's
+clocked
+bedazzle
+cause
+coordinator
+buildup
+countenance
+abhorrer
+backtracked
+bogus
+closer
+broilers
+chirps
+adjournment
+belles
+bitingly
+befogged
+contexts
+amorous
+breeding
+abortions
+blockage's
+alternatives
+bouncing
+beryl
+ballistics
+banters
+carpenters
+auction
+bowdlerizing
+brazen
+bonuses
+circulated
+adultery
+archival
+bears
+baptized
+burglaries
+borrowing
+barbarous
+casher
+adolescents
+atrophic
+busily
+aerating
+coatings
+athenians
+casing
+consuming
+alphanumeric
+beaches
+bisection's
+conjecturing
+aspirate
+biography's
+accompany
+bureaucrat
+broomstick's
+colony
+coalesce
+clock
+bequeath
+collaborates
+belonging
+configured
+burlesques
+anode
+consenter
+bug
+counterpoint
+counts
+bangladesh
+analogical
+accident
+bulky
+affinities
+abysmally
+boorish
+assiduously
+cannisters
+autocollimator
+bassinet's
+barrelling
+blurts
+carbonize
+candle
+act
+addressees
+constraints
+boast
+complaining
+coziness
+avocado
+coolest
+blank
+beadles
+anytime
+covetous
+appellant's
+angers
+academies
+ageless
+chased
+constitution
+consonant's
+boosting
+ascetics
+aerosol
+apse
+blushes
+clang
+confers
+confidentiality
+coolie
+colon's
+chickadees
+badminton
+argonaut
+constituting
+aloha
+contracts
+broomstick
+brackets
+attendant's
+connection's
+conciseness
+abstractor's
+composes
+chaste
+assures
+conjuring
+barbital
+bunion
+bases
+clowns
+barrelled
+audience
+auctioneer
+complexly
+aviator
+conjectures
+backscatters
+cheerfulness
+communicating
+agreement
+bricklayers
+bilabial
+abstruseness
+cobol
+cooperating
+admit
+blundering
+accelerates
+assaulted
+concealing
+anachronism
+bowels
+butane
+anniversary's
+converts
+convoyed
+climates
+barriers
+clubbing
+additives
+bask
+confessing
+caravan
+colonizes
+continuous
+cheerlessness
+boggled
+armpit's
+bridgework
+allegro
+cricket
+cannon
+adoption
+clanging
+auscultations
+billowed
+alphabetize
+airlift
+appointee
+boyfriend
+chaotic
+corrections
+bonus
+contrasted
+convulsion's
+confessors
+adumbrating
+autocrat's
+coronary
+authentic
+barley
+brawling
+aegis
+appends
+bolshevism
+charted
+applicant
+aileron
+considers
+chin's
+alkyl
+amendment
+boulevard's
+avian
+breather
+canyons
+cannon's
+apportion
+badgered
+augers
+advisers
+censuses
+beveling
+aught
+arthogram
+anonymity
+appliance
+atmospheric
+anesthetizing
+ambulances
+blustering
+burnt
+chestnut's
+collects
+aliment
+anxieties
+championship's
+channeled
+arrival
+amassing
+corpse
+bedtime
+blackbirds
+cats
+constants
+chemistry
+brewery
+brother's
+boasts
+accentual
+bellwether
+bely
+courted
+baroness
+configure
+collection
+aviary
+achieves
+belfry's
+beech
+baseman
+bacterial
+contestable
+blond
+contracting
+comparably
+consultation's
+booster
+conspiracies
+belief
+candidate's
+boardinghouses
+connectivity
+check
+crazy
+collided
+assistant's
+critic
+bilateral
+cheapening
+appalled
+autopsy
+balled
+abnormally
+acquires
+aloofness
+backwaters
+combative
+computerizing
+craters
+contributorily
+behaved
+comers
+axiomatizations
+analogously
+banjo's
+cleanser
+capitalizes
+chamberlain
+aggregates
+amenorrhea
+begins
+condone
+cleaved
+bustard
+adsorb
+airedale
+bridles
+audited
+could
+amour
+checkbooks
+admiring
+arrested
+commerce
+asbestos
+can's
+clamping
+bathers
+acknowledgments
+census
+acrobat
+bargains
+apogee
+creaking
+busboy's
+additional
+chants
+circumvents
+afloat
+anyplace
+alumnae
+anions
+classroom's
+ballerina's
+convents
+angered
+climbers
+citation
+cools
+clamor
+capaciousness
+beatific
+abrades
+advocating
+coverings
+claims
+brethren
+advertised
+atrophies
+coffer
+beagle's
+brazenly
+bitterly
+clergyman
+braiding
+compressible
+convicting
+agreeableness
+antithesis
+cogently
+botanist's
+bidirectional
+bewilders
+airlock
+costumer
+blamelessness
+agglutinins
+catalyst's
+allocation
+annunciates
+borderings
+accomplishes
+confronters
+clinically
+breadbox's
+canvassed
+communicative
+coercing
+backpointer's
+bramble
+congregations
+crave
+courtesy's
+cocoon's
+admitting
+chieftains
+acclimate
+consequences
+cones
+contradict
+axolotls
+contractual
+artist
+atrociously
+consecutive
+berated
+bluing
+attacks
+choruses
+blatant
+balance
+amplifier
+assist
+analyst's
+ambler
+conveyance
+compromising
+baffler
+corridor
+bed's
+condoned
+boulevard
+anomie
+averages
+basics
+apologia
+cabbages
+concretes
+alcoholic
+aliased
+chocks
+balsam
+collies
+censor
+arouses
+conundrum's
+academically
+bent
+codings
+coastal
+allots
+acclaim
+citations
+cantor
+circularly
+boarder
+caribou
+biologist's
+cowling
+connects
+chasing
+bootstrap
+backscatter
+abstractly
+corrupt
+alleviating
+biasing
+abrade
+arraignment
+beaten
+blanketing
+compactness
+adage
+coincided
+borate
+bra's
+concepts
+bootleger
+christian
+argos
+basal
+abate
+campuses
+abridging
+confusers
+cabin's
+audition's
+amphibians
+attractively
+adhesive's
+ascendency
+beforehand
+ache
+brokers
+bowler
+criminally
+american's
+chock's
+artillerist
+appropriation
+characterization's
+artifices
+annoys
+constituents
+bottle
+beaned
+consisting
+beholding
+ceremony
+carpeted
+absolutely
+anorexia
+accredited
+azaleas
+amaze
+commit
+afflicting
+contriving
+adventure
+blood
+blabbing
+absoluteness
+appreciable
+approachers
+bumptious
+behavioristic
+anticipates
+adults
+barnyard's
+banging
+banana
+bilge's
+aware
+coheres
+bronchi
+commissioned
+arrogation
+confines
+core
+attenuation
+afterwards
+clearing
+applies
+alphabetized
+cemetery's
+campaigning
+abolishes
+brig
+cheer
+combers
+backtracker
+clinker
+clouds
+clog
+berries
+advising
+childish
+clobbered
+bride's
+astrophysics
+canker
+concatenate
+bite
+chagrin
+bodybuilders
+calamity
+admiralty
+councillors
+competitive
+assessments
+copper's
+cabling
+casket
+conducted
+backplane
+boyfriends
+bingo
+broader
+confiscates
+communicated
+baton
+cocktails
+albanians
+boardinghouse's
+brats
+akimbo
+categorizers
+comparator's
+blackbird's
+accidentally
+companion's
+clippings
+accosted
+bell's
+burly
+aggregations
+boathouses
+airmails
+abreactions
+changers
+carbon
+cleaners
+bookkeeping
+correlations
+backer
+conclusions
+brainstem's
+anecdotes
+chateau
+cogitating
+amphibious
+compounded
+completeness
+comptroller's
+boatswain's
+bolstered
+acquiescing
+actors
+calorie's
+adaptability
+abstractor
+bimolecular
+belly's
+automobile
+automotive
+analyticities
+awesome
+colonizer
+approximated
+chemist
+coronet's
+classmate
+anteater's
+altars
+adulthood
+amid
+assails
+blizzards
+corroborative
+biographer
+compartment
+blooded
+bipartisan
+bluff
+aloof
+bronchiole
+clincher
+congratulations
+ablation
+caught
+collier
+chooses
+antidotes
+artery
+clearance
+civility
+basketball
+auscultated
+behaviorally
+crowning
+autobiographical
+cheaply
+brutally
+agonizing
+clerk
+comprising
+baller
+confuses
+acquiesced
+astonishingly
+birthplace
+covered
+chopper
+combinator
+benignly
+bedside
+blasts
+billboard
+appraise
+aboveground
+comforter
+credulousness
+battlefield
+barefoot
+cleverness
+apparatus
+bartering
+bromine
+aerodynamic
+crabs
+chains
+airflow
+allegrettos
+armchairs
+blacklist
+approvals
+bait
+collections
+antecedent's
+airbags
+casted
+content
+conferrer's
+crouching
+coughs
+canal
+amphetamine
+augustly
+bedraggle
+arithmetic
+cataloger
+alluding
+credulity
+coffees
+crueler
+beautifully
+caresses
+correlative
+consul
+criticizing
+couched
+baths
+alchemy
+bargain
+accomplishments
+conveyer
+benevolence
+broil
+chilling
+axed
+attire
+collisions
+categorizes
+cited
+aeration
+accommodating
+coordinations
+boxcar
+cattle
+bullion
+afternoon's
+captures
+afghans
+comets
+component's
+ark
+bounds
+adjusting
+bravely
+capability
+chap
+absolving
+aspirating
+arcs
+conspires
+collaborated
+admonishment
+astounds
+brasses
+compromise
+changed
+consumers
+connoting
+buttonholes
+cordial
+anionic
+chastisers
+archive
+alleviate
+burglarize
+acquainted
+copiers
+cashers
+antisocial
+creations
+bookie's
+censure
+beadle's
+banded
+circled
+bulged
+cheapness
+attorney's
+chewer
+bookshelf's
+councillor
+assertion
+broom's
+contemplations
+club's
+balkans
+cherubs
+alas
+chair
+apologizes
+compartments
+beyond
+aptly
+censured
+allegros
+boosts
+card
+arithmetizes
+attainment's
+arrester
+anding
+asker
+compatibilities
+confidentially
+commissioning
+cleaner
+aversion
+cooperative
+battalion's
+cemented
+charity's
+conceited
+capable
+anymore
+computing
+aping
+chiefly
+affair
+beaners
+allying
+caption's
+antipathy
+causal
+abyss
+botchers
+burglarizing
+confidant's
+activator
+continent's
+census's
+brat's
+antagonism
+bedspring's
+antiserum
+charge
+connector's
+alike
+believable
+belfry
+cast's
+bureaus
+beneficiary
+abolisher
+artichoke's
+broadly
+concurrent
+alteration
+bookies
+crafts
+bays
+ass
+bouquet's
+ave
+chords
+crazes
+anemic
+appoint
+beets
+billing
+contest
+assassination
+allot
+brindled
+acute
+absolves
+adsorbed
+auxiliaries
+belatedly
+businesslike
+assassinates
+bookkeepers
+bevel
+adders
+automate
+archangels
+breakfasted
+changeability
+contested
+cradles
+combatants
+besieging
+certainty
+attempts
+bankrupting
+compiler's
+complications
+banquets
+ancestor's
+ail
+abbreviating
+compacter
+approvers
+acknowledges
+comically
+almonds
+counsellors
+calmness
+assailed
+crane's
+baser
+big
+corruption
+circuitry
+briefness
+community's
+banquetings
+alms
+bass's
+bellowing
+adoption's
+blockading
+compellingly
+builders
+befallen
+bombproof
+cartons
+chore
+crimson
+anther
+clucks
+assemblies
+beatitudes
+aspiration
+compels
+angst
+balancing
+bowstrings
+bayonet's
+butte
+biomedical
+casualness
+accolade
+blackberry's
+bunched
+affright
+clung
+burlesque
+bare
+corrected
+arbitrate
+cropping
+coherently
+bloodhound
+circularity
+courtesies
+articulating
+concluded
+analogy
+brutalized
+airmail
+cooperator
+cousins
+centralization
+bibbing
+beside
+bravo
+abductors
+cars
+bovines
+bump
+absconding
+chins
+chasers
+boundary's
+antecedents
+awed
+counselled
+aback
+attenuator's
+blazer
+bettered
+awaken
+abreast
+beagles
+artisans
+buckled
+credence
+control's
+bewhiskered
+calloused
+breathe
+collaring
+blossoms
+bring
+actualities
+bivalves
+animals
+cowboys
+constituency
+affordable
+acrobatic
+attiring
+boatswain
+concurrence
+abrasions
+babel's
+cowerers
+chiffon
+bostonian
+criterion
+blinds
+cased
+affections
+conditioners
+clutter
+accrued
+attractors
+botcher
+compunction
+bludgeoned
+censored
+allah's
+chronic
+burrs
+commodity's
+appraiser
+asserters
+cheaters
+besting
+anchorite
+combine
+afforded
+cigarette's
+bathrooms
+apostles
+chloroplast
+bootlegging
+bibliographical
+beans
+bylaw
+benefited
+brochure's
+cordially
+brashly
+beastly
+bologna
+alderman's
+burning
+billow
+convert
+buffaloes
+comparatives
+assistances
+camouflaged
+announcement
+bobwhite
+brawl
+adducted
+cavern's
+affectation's
+bandying
+brunette
+architect's
+aphorisms
+cremate
+bray
+billed
+conception
+battlefield's
+bandaged
+broaches
+bazaar's
+beatification
+bigotry
+clergy
+abstains
+befits
+bantering
+conceivable
+attachers
+analogies
+bimonthly
+august
+additionally
+confirmation's
+ballooning
+cardboard
+belle's
+counterparts
+candor
+bishop
+comprehension
+affronted
+bravura
+courting
+antidote
+buggies
+arisings
+appendix's
+bright
+categorize
+cooking
+agnostic's
+billets
+amok
+bewitching
+audiograms
+column's
+bussed
+checkbook
+alteration's
+atherosclerosis
+broached
+based
+cacti
+boardinghouse
+bowdlerized
+anchoritism
+achievement's
+bald
+cover
+codifications
+capacitor
+brashness
+causes
+acyclically
+argument
+boarders
+audiometer
+compute
+contribute
+crisply
+bitters
+circumvent
+assailant
+bosun
+buyers
+alibis
+blurting
+coasts
+bivouacs
+arrogating
+albanian
+attempted
+acquisitiveness
+applauding
+alfalfa
+cantors
+canonicalizes
+alkaloid
+bruising
+associativity
+budgetary
+carbolic
+clashing
+buffalo
+acorn
+analyzing
+backyards
+comedian
+betwixt
+aces
+chartered
+additivity
+becalm
+combat
+characterizations
+clinics
+bulbs
+bloc
+amenable
+civilian's
+breech
+attainment
+bounding
+compiler
+cotyledons
+billboard's
+caper
+aphasia
+chester
+combats
+biddable
+articulates
+caps
+assignees
+bifocals
+beady
+chinese
+assertions
+allegation
+championships
+accrue
+containment's
+croaking
+classifying
+annum
+brightened
+bits
+appointer
+besieger
+citizen's
+cerebral
+canto
+bakers
+capitol's
+authorizer
+blockaded
+anodizes
+alarmed
+buttressing
+attenuates
+bumptiously
+chronological
+colleges
+coward
+contraption's
+abstractions
+controversial
+boric
+bids
+agents
+backpointer
+bumped
+bottoms
+bowlines
+captivated
+article
+cliche's
+chases
+choker
+bremsstrahlung
+consult
+adjudged
+auctioneer's
+covers
+accurateness
+clues
+bugler
+bareness
+cedar
+alleviation
+anesthetically
+backpointers
+arched
+administered
+arrowhead
+continues
+asks
+confessor's
+allure
+backlogs
+childishness
+appointive
+covering
+conscience's
+bellows
+blanked
+considerations
+appalachian
+aerate
+budged
+city's
+accordion
+cliche
+collectors
+comprehensive
+boomed
+chariot
+baffling
+bunkmate's
+bumbles
+contaminating
+corroborating
+applications
+bursting
+cabbage
+befalling
+acquittal
+compromisers
+components
+arpeggio
+brothel's
+credibility
+begrudge
+confirmation
+academy
+appertains
+calibrates
+bureaucrats
+bawl
+costuming
+biography
+adoration
+cloaks
+aggregating
+business
+aphorism's
+carters
+admixture
+coexistence
+anomalously
+adapts
+amide
+affiliation
+capillary
+biscuit
+brainy
+bellhops
+chartings
+cohered
+austria
+champions
+basin's
+cascading
+consultants
+bison's
+admixed
+arithmetically
+clothed
+betterments
+conspirator's
+addition
+adolescence
+bolsheviks
+abominable
+breathless
+cozy
+arouse
+bumble
+about
+apace
+astronaut
+asteroid
+cable
+crab's
+beachhead
+assets
+analyses
+bisection
+coconuts
+alleys
+armament's
+bloodstains
+arpeggios
+apologist
+blithely
+anabaptist's
+beadle
+channelled
+confuse
+annoy
+beautifiers
+cheats
+clenches
+amuse
+bewail
+constitutional
+birth
+appendixes
+amazed
+berry's
+bilingual
+blustery
+amplification
+clogged
+blackmailing
+breakables
+adduct
+bondsmen
+conferred
+codewords
+bequeathal
+abundantly
+banner's
+atrocity
+congested
+closely
+absolution
+concatenations
+anarchic
+crag's
+communicators
+cavities
+comptrollers
+backstage
+bewailing
+charcoal
+conveyances
+collar
+bores
+briefest
+comments
+awning's
+associator's
+antarctica
+correspondingly
+bidden
+ad
+clings
+bit's
+apollo
+bulldogs
+chateau's
+amounting
+cogitates
+bellhop
+bookish
+bout's
+cannister
+bicep's
+asses
+beef
+battlefields
+consort
+auspicious
+breezy
+buried
+beverages
+approximates
+conduction
+bleakly
+blanketers
+ascertained
+absentminded
+bolivia
+births
+behave
+bilk
+breaths
+charter
+abstaining
+appareled
+boulder's
+breadwinner's
+correct
+accessed
+befitted
+adulterer
+axe
+activation
+betrothed
+asymptote
+bullet's
+clusterings
+baud
+bustling
+ballplayer
+constraining
+cleared
+brown
+affirmed
+agencies
+churches
+backyard
+burntness
+bronchioles
+charmers
+backscattered
+abridgment
+claw
+blow
+adjourning
+constantly
+brightens
+autobiography
+cards
+bypassing
+alcibiades
+concurrency
+chuckles
+bests
+belligerents
+adjustments
+bolshevik
+cabins
+astronomically
+cartridge
+boxcars
+boned
+bottomed
+burgeoned
+adjourned
+apprenticeship
+chastiser
+breached
+boycott
+butchered
+coordinating
+cottage
+brainwashing
+confinement
+bandies
+absentee
+collapses
+cruel
+along
+alloy
+convoying
+assignment's
+crisp
+ambidextrously
+blindfolded
+chilly
+condenses
+avers
+broiler
+anesthetics
+beaker
+cholera
+brag
+coffins
+cranked
+allocator
+brutality
+acquire
+blushing
+briar
+abolish
+crossovers
+broiling
+consolers
+beatify
+almanac's
+cooled
+commencements
+clasp
+committing
+condemnations
+altar
+by
+bombastic
+confederates
+bong
+concerted
+compilers
+counterproductive
+brig's
+accurate
+avidity
+cleavage
+blame
+conceive
+assessor
+consolingly
+concise
+computes
+alliance
+clucked
+axon's
+annunciating
+baseball's
+allusion
+brays
+auras
+blond's
+bronchitis
+ciphers
+blowing
+broth
+canonically
+baseness
+byline's
+appetite's
+colonists
+condensed
+cawing
+beaning
+broadening
+colonist's
+apocrypha
+chauffeured
+cored
+branding
+carrier
+assessed
+collegiate
+chirped
+accounted
+clubbed
+antibodies
+behalf
+alphabetizing
+conqueror
+alpine
+budgeters
+casements
+appropriate
+compliments
+cast
+accountancy
+cathedral
+conserve
+accorders
+arbitrarily
+cowing
+bars
+bagel's
+climax
+attention's
+cautioning
+centipede's
+almost
+abstractionist
+carpenter
+containing
+arab's
+courtesy
+carton
+accelerated
+bowman
+boastings
+banal
+bucking
+accomplishment's
+classification
+baldly
+abruptness
+calibrations
+blocs
+biking
+assenter
+adversities
+compartmentalized
+chemical
+attic
+audiogram's
+applauds
+crests
+bad
+bounce
+accelerators
+contemptuous
+attentions
+cancellation
+battles
+aging
+advantages
+answers
+bruised
+castes
+anthologies
+any
+coped
+arcade's
+adaptively
+arsenal's
+confessed
+controllability
+acceptor's
+abrogated
+abutted
+amusingly
+apology
+broils
+court
+adiabatic
+ambitions
+charged
+awfulness
+consorts
+boundaries
+bode
+collie
+botanists
+blurring
+absents
+batten
+backwoods
+breaks
+certified
+chattering
+admitted
+bathrobe's
+analogous
+corporacy
+bijection's
+combatant
+checked
+condition
+amoral
+bayed
+bedroom
+chanting
+antics
+charity
+blip's
+biped
+brilliance
+catchers
+booted
+anabaptist
+clothe
+comforted
+complaints
+coacher
+admissible
+bang
+concisely
+cookery
+capita
+assurance
+codifying
+benchmarks
+aunts
+commentaries
+anon
+applicators
+constructor
+associated
+abuses
+choicest
+confiding
+antislavery
+apron
+ashore
+cheerfully
+betterment
+administration's
+campaign
+cremated
+ambulatory
+bleacher
+afterthought
+barkers
+choir
+crossly
+conducive
+cache's
+battery
+actinium
+countryman
+cajoled
+appeasing
+beamer
+cleaves
+anthem's
+clearing's
+cooperated
+barker
+crowing
+apprising
+accusation's
+beginning
+associator
+booking
+caved
+amicable
+codify
+clairvoyant
+bevels
+becalms
+brawn
+bunkhouse's
+arms
+antiredeposition
+belt
+antiphonal
+cried
+brae's
+bridal
+acronym
+clay's
+checkers
+auxiliary
+bind
+compares
+agilely
+askers
+blankly
+antagonist's
+bimodal
+captivation
+creditable
+concentration
+amateur
+adjure
+ascertaining
+budge
+adulterate
+additive's
+cardiac
+born
+brewed
+borneo
+bun's
+blue
+cackled
+acclimates
+airline
+blinder
+brokerage
+communicant
+central
+aggrieved
+asynchronous
+bough's
+acidly
+archaeology
+complementary
+animator's
+bodyguards
+climbs
+apathy
+constellation's
+acculturate
+archaeologists
+contingents
+calling
+bartender's
+autopsied
+correspondent's
+carnivals
+abjure
+bystander's
+bungle
+chanticleers
+conceding
+burghers
+boards
+accessions
+compensations
+arabian
+churn
+crowed
+centering
+abnormalities
+courtier's
+congregation
+aberrant
+annexing
+blockhouse
+anthropomorphic
+bedder's
+abutting
+conundrums
+affiliated
+cancellation's
+bolts
+ballgowns
+augmenting
+bureaucracy's
+bootlegged
+audiometers
+blueberry
+affliction
+appreciation
+codifier
+amasses
+countering
+crackle
+canoe
+consuls
+breathes
+broiled
+amalgam's
+bodes
+ballooners
+coating
+corollaries
+amphibology
+agenda's
+chafing
+alcoholics
+accredit
+anisotropy
+anchovies
+carriers
+acceptors
+betrayed
+buttocks
+busy
+bunny
+cropper
+accreditations
+bumblebee's
+adhesives
+civilize
+accedes
+abroad
+arch
+crept
+cotyledon
+alphabetic
+braille
+control
+anglophilia
+billings
+corporate
+athlete
+accusing
+appear
+announcing
+accordions
+computerize
+combinations
+bile
+abut
+charger
+columnize
+computer
+blacks
+converges
+blamer
+bulked
+convincingly
+checker
+correspondence's
+accelerate
+accessible
+conceivably
+abscissa's
+adsorbs
+anglophobia
+anomic
+casters
+churning
+crease
+brood
+appendage
+bulwark
+bombers
+arcaded
+breadboard's
+aphrodite
+color
+commodore's
+answerer
+bobolink
+cloth
+conversion
+clime
+artery's
+birthplaces
+compiled
+arrack
+beetles
+bobs
+compatibility
+cocoon
+counterpart
+audible
+colonies
+airport's
+beige
+cogent
+bromide
+begrudging
+acids
+crucifies
+beggary
+archipelagoes
+availably
+counterfeiter
+blanketed
+amending
+accelerometer's
+advisors
+byway
+alignment
+amber
+austin
+copyrights
+beaus
+brigantine
+comforts
+appointment's
+crawler
+bangles
+contemplation
+concur
+characterizing
+censoring
+charters
+catalogues
+appropriately
+builds
+aeronautic
+confused
+comber
+axially
+cackler
+coercive
+ambassador
+arcades
+brash
+amorality
+belittling
+battling
+bloodied
+acrylic
+bantered
+clasped
+carcass
+archangel
+annunciators
+aristotle
+boulder
+burglarproofs
+chooser
+abilities
+calmest
+bach
+always
+blaspheming
+crossover
+bakeries
+clocks
+ankle's
+accidental
+arbitration
+chirp
+aeronautical
+boy's
+acidic
+bowline
+anonymously
+cod
+couplers
+beautifications
+bluffing
+backarrows
+brow
+covenant
+acronym's
+banning
+albeit
+ascetic
+burn
+animator
+beatnik's
+coveted
+cipher's
+broke
+cap
+bellman
+bulldozed
+clarifies
+bathes
+blip
+availabilities
+booth
+clangs
+audiences
+cathedrals
+confounding
+bigot's
+beecher
+arts
+company
+attributed
+avenged
+bawling
+caustics
+alee
+bordello's
+banks
+affords
+complied
+commas
+collaborate
+aquatic
+ambitiously
+burro's
+beard
+bittersweet
+candlestick
+bylaws
+broadcastings
+believe
+barrels
+braying
+certifications
+contrasts
+crashes
+audition
+confine
+bucks
+abates
+bureaucracy
+ambles
+besiege
+broccoli
+antibiotics
+attenuators
+accelerometer
+caste
+bib's
+browbeaten
+appurtenance
+bauxite
+asceticism
+case
+chewing
+aerator
+achievements
+barricade's
+agglutinates
+bewildering
+cartridge's
+children
+bufferrer
+actuator
+converging
+bolted
+chat
+combs
+chemist's
+adduced
+algebraic
+circular
+bloated
+conclusion
+burgess
+certifies
+absconds
+comprise
+benzedrine
+bumbler
+banjo
+allow
+appealing
+cooperation
+abraded
+chaperoned
+biracial
+braced
+censurer
+acoustician
+appraised
+benefitting
+constructs
+convertible
+administrative
+asocial
+area
+creature
+besetting
+crater
+begrudgingly
+blanket
+ablest
+alba
+airplanes
+allowing
+briefly
+beneficences
+concurring
+adjective's
+cork
+aerospace
+anomalies
+asher
+auger's
+boilers
+abhorring
+broadenings
+bladder
+belay
+approver
+abdominal
+commends
+cringing
+billiards
+beater
+auspice
+contrasters
+bights
+absentees
+atoll
+cooler
+activator's
+basement
+burgeon
+allusiveness
+codeword's
+bandage
+contemplate
+adopted
+coping
+carving
+baptism
+colds
+altos
+background
+closet
+commuted
+acre's
+aliens
+council
+cans
+cheese
+ally
+aseptic
+belgian's
+crossbar
+addressed
+commons
+call
+careers
+breakfasting
+brazilian
+catholics
+bachelors
+consultant
+brighter
+crossword's
+burglar
+avoidable
+batting
+cigar
+amps
+axiological
+combed
+comforters
+albumin
+cookies
+booming
+archaize
+canton's
+bunkmate
+combination
+bondsman
+anxiously
+affixed
+associatively
+cigar's
+backstitch
+calls
+captivates
+commodities
+atmosphere's
+asserting
+beaver
+beatnik
+container
+activists
+consoler
+commoner
+buttonhole's
+abhorred
+aggregate
+cliff
+antidisestablishmentarianism
+broach
+ambling
+comer
+bited
+advocated
+behaves
+bosom
+continents
+conserves
+bashful
+ago
+backarrow
+circumventable
+avocados
+briar's
+annuls
+barnstorming
+aired
+carry
+crossbar's
+aspire
+beards
+abides
+cliques
+completes
+brassiere
+absorbs
+annul
+chairman
+baron
+battens
+africans
+abatement
+colonization
+carries
+borough
+allurement
+breakfasters
+alkali
+acoustically
+corners
+capturer
+casualties
+asphyxia
+animized
+administrator
+belying
+basketballs
+bylines
+bandit
+autopsies
+braining
+contradiction's
+antic
+butted
+bacillus
+blurt
+conditioned
+backers
+agreeable
+almanacs
+cider
+chicken
+chambers
+clutch
+assailant's
+conveyers
+amazers
+beribboned
+breeder
+caveat's
+buffers
+combination's
+ampersand's
+crafting
+clanged
+caving
+aspirant
+butlers
+adjective
+auckland
+announced
+creators
+caches
+baseline
+codifies
+baptism's
+coarsened
+cohesion
+airman
+avenge
+backaches
+budgeted
+armpit
+bicycled
+converged
+besmirched
+autonomic
+coming
+assemblage's
+chained
+admissions
+alcoholic's
+branches
+bunk
+anciently
+bloods
+adventurers
+amazes
+coloring
+abstractors
+adaptation's
+boar
+amulet
+agglutination
+conquerable
+booker
+confronts
+barometer's
+bedbugs
+barricades
+cheap
+bewitch
+circus
+backward
+archeology
+automobiles
+bending
+amino
+beckoning
+admits
+berliners
+borer
+clambering
+atomizing
+banner
+blissfully
+catchable
+breakdown
+abjured
+computerized
+chaplain's
+amphitheater
+ballot's
+craziness
+croaks
+counties
+adopting
+breast
+airstrip's
+basin
+contemplating
+commitments
+critique
+appears
+bellies
+baccalaureate
+abducted
+blackened
+animosity
+appraising
+antiquity
+assistants
+asthma
+bootstrapping
+bounties
+agleam
+advertisements
+benches
+artful
+broadens
+chuck's
+betrayal
+blasphemed
+brooms
+castled
+coroutine
+conscious
+beetle
+banshee
+advertising
+baring
+awakens
+balm
+billions
+compromisingly
+ballroom's
+burrower
+bayou's
+ambiance
+beheading
+bought
+adagios
+adornment's
+anointed
+abolishment's
+anesthetizes
+badly
+boyishness
+consultant's
+cheek
+cannibals
+breakdowns
+assured
+agates
+bicker
+appliances
+cafe
+bagpipes
+adrenal
+combinatorially
+belligerence
+bricked
+adjacency
+aimless
+crook
+cherry's
+assessing
+brushfire
+cormorant
+captained
+blundered
+conceptually
+congress's
+contraster
+ambushes
+bronze
+autotransformer
+corded
+brisker
+contently
+announcements
+bullet
+apportionments
+columnized
+canon
+conservation
+algaecide
+blackening
+compassion
+beaks
+constructibility
+chapter
+abscond
+costly
+bacon
+coldest
+aptness
+billionth
+altercation
+approbation
+alternator's
+criticizes
+befell
+canopy
+buoyant
+brazil
+anticipate
+absenteeism
+champion
+aesthetics
+cadence
+betroth
+confidants
+bean
+braid
+aphids
+cluttering
+cantankerously
+bloom
+barbarity
+clawing
+bogged
+agreed
+asia
+abrasion
+corporals
+baselines
+box
+chartering
+apotheosis
+ampersands
+conceit
+creamer
+adhered
+circuit
+carpet
+accompaniments
+boomerangs
+blindness
+chipmunks
+bewitched
+allocate
+bicycle
+compacted
+cab
+calcium
+cellists
+apex
+borrows
+completed
+brightly
+constables
+ascertains
+conspiracy's
+badgers
+bunion's
+anabaptists
+broadband
+clefts
+accepted
+benched
+catalogued
+cadenced
+alliteration
+acquiesces
+boxcar's
+athlete's
+bracing
+cremations
+analysis
+crossings
+assorts
+apologize
+brazier
+configurable
+basking
+craves
+belle
+conversation's
+belligerent
+anesthetize
+brewers
+cackles
+adventures
+airlock's
+booklet's
+apply
+anecdotal
+bewails
+computer's
+autographs
+acclimated
+coefficients
+avidly
+beckoned
+broadener
+bulk
+blacklisting
+belly
+acquit
+convoy
+achiever
+aversions
+advisor's
+captor's
+camel's
+asset's
+advantageous
+basement's
+confident
+crescents
+compiling
+butler's
+cartoon's
+adaptive
+chlorine
+abets
+cruelly
+amiable
+baleful
+ceiling's
+adumbrated
+cherry
+aspirant's
+cashing
+candidly
+chaff
+bitter
+brim
+alcove
+bulb's
+carbonizers
+citizen
+attic's
+breed
+consumer
+conferrers
+accommodations
+contrapositive
+beget
+brilliantly
+attentionality
+continuation's
+bosses
+brave
+configurations
+benediction's
+conferring
+accessor's
+bobolinks
+bulled
+cleanness
+algorithm
+advancements
+altogether
+accumulations
+albacore
+bowing
+belching
+apical
+consequentiality
+bagpipe's
+ambrosial
+bullying
+cleans
+attendance's
+complimenter
+blink
+cager
+assembling
+coat
+allowable
+astringent
+antiresonator
+cardinal
+clicks
+commentator's
+blossom
+categorizing
+amphibian's
+commonality
+consonant
+classics
+affable
+accorded
+aimlessly
+archetype
+administerings
+boldness
+anatomy
+apprehensively
+absence's
+actuality
+attempting
+categorical
+checkpoints
+allemande
+corer
+behoove
+bleaches
+bough
+blended
+blotting
+baptists
+courtship
+benevolent
+bumptiousness
+chum
+anguished
+auto
+career
+bookstore's
+carbonized
+autocratically
+cherishes
+attendees
+contends
+anastomotic
+attributing
+abbot
+came
+blunt
+battlement's
+affection
+coordination
+annotate
+besets
+bucked
+boasting
+benedictions
+adherent
+blimp's
+acknowledging
+cleverly
+applejack
+annexation
+bat's
+cantons
+beetled
+closed
+country
+creatively
+bakery
+blasphemously
+chalking
+bold
+attended
+crasher
+backtrackers
+artist's
+bracelet's
+allowably
+affiliating
+arrant
+brayed
+barbells
+consigned
+abolishers
+climatic
+atrophying
+amigo
+arsenal
+ascribes
+converses
+aura's
+allotted
+bliss
+classical
+bigger
+ahead
+chopped
+blade
+casualty
+acceded
+bottling
+axon
+casement's
+battlefront's
+convinces
+alerting
+advertisers
+blemish
+agglutinating
+commonplaces
+autocorrelation
+armistice
+crediting
+besmirch
+amplify
+auscultation
+befalls
+called
+alnico
+arbiter's
+abort
+argonauts
+cessations
+cribs
+blare
+aforementioned
+condemners
+contaminated
+complained
+bootstrapped
+criticism
+cooperatively
+binding
+bullies
+basins
+contrived
+assort
+adulterously
+booms
+abandons
+also
+appealed
+count
+contributed
+beet
+crashers
+carryovers
+clays
+blackness
+cosmetics
+awkward
+blurted
+bothers
+analyzer
+backups
+alarming
+bicyclers
+credit
+abrogate
+audience's
+architecturally
+alibi's
+complicator's
+chuckle
+corporately
+banishment
+communist's
+birdie
+asymptotic
+break
+braze
+benzene
+bridgework's
+beak
+agitators
+collateral
+arranges
+bayonet
+breathlessly
+counsellor
+creates
+convulsions
+backdrops
+applicants
+altercation's
+commission
+breathtakingly
+corresponds
+backdrop
+armaments
+build
+biannual
+buttoning
+computational
+chaired
+bather
+critically
+amanuensis
+bantus
+confidential
+annoyance's
+carder
+authorizing
+acquits
+bipeds
+cocktail
+cinnamon
+burros
+brocade
+abdomen's
+creative
+acquisition's
+abdomen
+baited
+aristocratically
+alive
+committed
+arrestor
+cleaving
+comedy's
+baggage
+bra
+adaptors
+afoot
+bulls
+contoured
+amalgam
+comprehensibility
+amortizes
+biographical
+confront
+covert
+cravat
+animates
+booksellers
+bypass
+bootleggers
+bedfast
+affair's
+buzzer
+bellowed
+aligning
+bystander
+acclimatized
+accomplishing
+against
+blankness
+adopt
+addressing
+croaked
+boaters
+behooves
+audits
+boatyard
+cruise
+agnostics
+ailing
+anchorage's
+adaptations
+conceptualize
+advised
+cries
+bank
+actuators
+brazing
+catalyst
+beachheads
+aplomb
+compressed
+amputated
+contractor's
+bedspreads
+bowed
+coon
+chaplain
+cannons
+coffers
+assembly
+bouffant
+converters
+ampoule's
+borderland
+archaeologist
+blankets
+conserving
+avalanche
+assortment's
+aspic
+axle
+bereaves
+allowance
+carbonization
+bartender
+clawed
+coincidental
+appeared
+chipmunk's
+countable
+authenticators
+bestow
+alps
+caw
+aniseikonic
+avows
+blackmails
+controlling
+correlating
+audiologist's
+bit
+approving
+collapse
+coon's
+cleave
+atheists
+brigade
+autopilots
+bounteous
+commercialness
+accede
+cavalierness
+accustoming
+burnishing
+clobber
+aspirates
+brochures
+cellar's
+communes
+berkelium
+chickadee
+cobweb
+circumstances
+chose
+comprehend
+baritone's
+aggravation
+adopts
+cruelty
+and
+axer
+cautioned
+carbonic
+babbles
+bet
+charitable
+computable
+cardinality
+amenities
+confiscating
+catcher
+audaciousness
+complaint's
+cooperator's
+buddies
+baking
+constant
+classmate's
+accentuate
+choices
+crop's
+authorization's
+comedy
+brushy
+brotherly
+canals
+ads
+causeway
+abrading
+cemetery
+autocrat
+briefing
+abdomens
+apparition's
+consummately
+alkaloids
+bulkheads
+cravats
+bales
+campaigners
+bagpipe
+accentuates
+arm
+barometric
+bas
+agitator
+behavior
+abutters
+blockades
+alertness
+civilizes
+chinner
+anthropologist
+artificialness
+balkanize
+automates
+cackling
+anarchists
+amounted
+cereal's
+anodized
+cobblers
+acknowledgment's
+blear
+copper
+alphabetics
+blackboards
+apish
+answering
+afternoon
+arbors
+accused
+chickens
+agency's
+contractors
+contraptions
+cosmology
+anomaly
+bandstand
+attempter
+account
+challengers
+admiration
+calculations
+autocracy
+analyticity
+accord
+buildup's
+commonly
+babbling
+adjudication's
+attain
+ameliorating
+candlestick's
+chronicles
+align
+consensus
+agate
+adulation
+aspirated
+conclusive
+biologists
+cracks
+conform
+chambered
+beryllium
+connote
+amusing
+aquifer
+ankle
+batteries
+conservationists
+accountants
+apiaries
+actinometer
+beckon
+clearances
+clouded
+antitoxin's
+consolation's
+collectives
+boxtops
+bombarded
+bombarding
+bluest
+allusion's
+construction
+ballpark's
+codified
+coincidence
+celebration
+chip
+beginner's
+algerian
+boo
+athletics
+condenser
+bytes
+beauties
+concerts
+conductors
+awl's
+agitations
+buttered
+codifier's
+armory
+ascii
+aspirin
+arthritis
+bylaw's
+conformity
+blasting
+coinciding
+aphid's
+ceremonial
+banisters
+bristle
+bid's
+buckboard's
+bandied
+biopsy
+ballrooms
+chloroplasts
+bidding
+boil
+algebra
+constellation
+chuck
+cringes
+cleanliness
+apron's
+cosmopolitan
+bashes
+abusive
+believer
+conductor
+butters
+breweries
+allotment
+artfulness
+bunkmates
+blares
+connections
+anticipated
+classifies
+commandments
+beginnings
+bend
+brambles
+blacked
+basketball's
+affectionate
+cocoa
+anacondas
+busing
+bone
+birchen
+creamed
+aged
+commemorates
+brother
+aberration
+crawl
+actuarial
+apology's
+alumnus
+adversary's
+anaphoric
+aspiring
+consciousness
+cokes
+assignee
+boxing
+blanched
+camels
+contemporaries
+carnivorous
+assigned
+apologetically
+corpus
+accusations
+beefing
+champaign
+claps
+adherence
+aloft
+complication
+citizenship
+becomes
+compound
+arabesque
+bronchiole's
+appraises
+breach
+collection's
+botched
+bitches
+biblically
+bronchial
+amalgamating
+commoner's
+barbarian's
+arrange
+cradle
+conformed
+complimentary
+anodes
+cowering
+anoint
+brocaded
+bedazzling
+avionics
+burnishes
+bulkhead
+chink
+consciously
+contract
+clinch
+applicant's
+awning
+aloud
+chandelier's
+cathode's
+babble
+arachnid
+biplane
+clamorous
+assuredly
+consented
+axing
+avenger
+commence
+braving
+brandishing
+careless
+burningly
+boatsman
+channelling
+clarifying
+beggar
+berates
+cite
+cowered
+buffer
+condescending
+admixes
+bettering
+bedazzlement
+cord
+burglary's
+characteristics
+aptitudes
+adieu
+agree
+bends
+ceremonies
+accustom
+accessibly
+commanders
+ask
+cavalier
+brayer
+affront
+courser
+becoming
+carves
+configures
+beasts
+biters
+conditionals
+bodybuilding
+accretions
+chapter's
+cleverer
+corning
+brat
+classes
+almsman
+consumptive
+antique
+comprised
+beholders
+anthropologically
+buns
+bridge
+accretion
+acceptance's
+confederacy
+armorer
+argumentative
+crossword
+cowslip's
+analog
+counselor
+chastised
+barters
+clerked
+americas
+cloud
+aide
+alternators
+admitters
+bagatelle
+bridges
+civilizations
+anion's
+briton's
+apartment
+acquaints
+consummation
+chord
+coated
+barer
+carnivorously
+cheering
+allergy
+capacity
+classrooms
+assistantships
+complimented
+amphibiously
+commandment's
+audiogram
+corked
+badness
+bewildered
+assemblage
+backplane's
+asterisk's
+blob
+coexisting
+approximations
+counteractive
+barns
+adherer
+aborigine's
+brooding
+conceived
+adjustor
+cabled
+belongings
+breadwinner
+blot's
+brightness
+consigning
+barflies
+bisector's
+basing
+complement
+conditioner
+brazes
+crank
+antinomian
+crowd
+accelerometers
+befitting
+backlash
+bastions
+acceleration
+briefcases
+correlated
+baffle
+chew
+accosts
+agreeably
+bassinets
+cogitate
+concerning
+contouring
+broadside
+compact
+brainstems
+atom's
+bondage
+biter
+archdioceses
+basis
+bellboy
+blobs
+barons
+clods
+campaigned
+assessors
+bubbles
+annal
+casual
+altercations
+clog's
+biased
+arianism
+ancillary
+collaborator
+butter
+bureau
+blending
+antiquities
+brands
+activism
+crews
+beats
+broad
+buds
+baggers
+cobbler's
+condemns
+cabinets
+bomber
+blinders
+center
+contacted
+bewilderingly
+circulates
+burnings
+achieved
+belch
+barbecue
+angles
+comparative
+befuddle
+cherished
+chapters
+chanter
+allegation's
+armstrong
+converter
+combinatoric
+angrier
+brooks
+clinked
+blubber
+appointments
+compactor
+cleaned
+car
+contention's
+artificial
+cramp
+consistency
+aborting
+collaboration
+awarders
+crippled
+anaphora
+creamy
+buoyed
+baptistery
+altered
+anchoring
+alterer
+adjuring
+beacon's
+commencement's
+ascension
+candidness
+clouding
+cigars
+boiled
+christmas
+contingency's
+alum
+apparel
+contributors
+anisotropic
+annotations
+bushwhacks
+brides
+continuities
+carton's
+blurred
+antibody
+aorta
+blankest
+combinator's
+banish
+breaches
+accumulates
+bowling
+braver
+antibacterial
+cooperators
+banked
+compensated
+chartable
+conjunctively
+antelope's
+bluefish
+annoying
+composed
+barges
+biconcave
+australia
+ballparks
+bearers
+acknowledged
+advocates
+crossed
+competitor
+blaming
+andorra
+baritone
+collaborator's
+accessibility
+complains
+commentator
+bibliography
+conference's
+atmosphere
+agrees
+bedstead's
+ardor
+character's
+conventionally
+arena's
+chokes
+channel
+bludgeon
+convoys
+condense
+beautifier
+ailerons
+compacts
+black
+bell
+completions
+ballroom
+besotting
+conservatives
+adventured
+bulldog's
+conversely
+arroyos
+compositional
+alternative
+association
+broods
+beefy
+consolidated
+balms
+acquaint
+animal
+certificate
+combustion
+aims
+cracker
+abetted
+cautionings
+bread
+attains
+agriculturally
+courtyards
+bawls
+country's
+creator's
+checkbook's
+cliches
+colonizing
+biennial
+aqueous
+craftsman
+contrivances
+algorithmic
+crate
+barefooted
+bodily
+anthropologist's
+but
+climate's
+campers
+crackled
+awakes
+conveyed
+borrowers
+approached
+avoids
+crib
+albania
+bathrobe
+admonitions
+architectures
+consenting
+anastomosis
+blob's
+actual
+arrowhead's
+accountable
+allegiances
+commendation
+appearers
+comply
+concurs
+controversy
+abstracting
+artifact
diff --git a/db-4.8.30/test/wrap.tcl b/db-4.8.30/test/wrap.tcl
new file mode 100644
index 0000000..1d341c7
--- /dev/null
+++ b/db-4.8.30/test/wrap.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# Sentinel file wrapper for multi-process tests. This is designed to avoid a
+# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs
+# to sit around waiting for some random process that's not DB's and is not
+# exiting.
+
+source ./include.tcl
+source $test_path/testutils.tcl
+
+# Arguments:
+if { $argc < 2 } {
+ puts "FAIL: wrap.tcl: Usage: wrap.tcl script log [scriptargs]"
+ exit
+}
+
+set script [lindex $argv 0]
+set logfile [lindex $argv 1]
+if { $argc >= 2 } {
+ set skip [lindex $argv 2]
+ set args [lrange $argv 3 end]
+} else {
+ set skip ""
+ set args ""
+}
+#
+# Account in args for SKIP command, or not.
+#
+if { $skip != "SKIP" && $argc >= 2 } {
+ set args [lrange $argv 2 end]
+}
+
+# Create a sentinel file to mark our creation and signal that watch_procs
+# should look for us.
+set parentpid [pid]
+set parentsentinel $testdir/begin.$parentpid
+set f [open $parentsentinel w]
+close $f
+
+# Create a Tcl subprocess that will actually run the test.
+set t [open "|$tclsh_path >& $logfile" w]
+
+# Create a sentinel for the subprocess.
+set childpid [pid $t]
+puts "Script watcher process $parentpid launching $script process $childpid."
+set childsentinel $testdir/begin.$childpid
+set f [open $childsentinel w]
+close $f
+
+#
+# For the upgrade tests where a current release tclsh is starting up
+# a tclsh in an older release, we cannot tell it to source the current
+# test.tcl because new things may not exist in the old release. So,
+# we need to skip that and the script we're running in the old
+# release will have to take care of itself.
+#
+if { $skip != "SKIP" } {
+ puts $t "source $test_path/test.tcl"
+}
+puts $t "set script $script"
+
+# Set up argv for the subprocess, since the args aren't passed in as true
+# arguments thanks to the pipe structure.
+puts $t "set argc [llength $args]"
+puts $t "set argv [list $args]"
+
+set has_path [file dirname $script]
+if { $has_path != "." } {
+ set scr $script
+} else {
+ set scr $test_path/$script
+}
+#puts "Script $script: path $has_path, scr $scr"
+puts $t "set scr $scr"
+puts $t {set ret [catch { source $scr } result]}
+puts $t {if { [string length $result] > 0 } { puts $result }}
+puts $t {error_check_good "$scr run: $result: pid [pid]" $ret 0}
+
+# Close the pipe. This will flush the above commands and actually run the
+# test, and will also return an error a la exec if anything bad happens
+# to the subprocess. The magic here is that closing a pipe blocks
+# and waits for the exit of processes in the pipeline, at least according
+# to Ousterhout (p. 115).
+
+set ret [catch {close $t} res]
+
+# Write ending sentinel files--we're done.
+set f [open $testdir/end.$childpid w]
+close $f
+set f [open $testdir/end.$parentpid w]
+close $f
+
+error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
+ $ret 0
+exit $ret
diff --git a/db-4.8.30/test/wrap_reptest.tcl b/db-4.8.30/test/wrap_reptest.tcl
new file mode 100644
index 0000000..24abfa3
--- /dev/null
+++ b/db-4.8.30/test/wrap_reptest.tcl
@@ -0,0 +1,59 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2009 Oracle. All rights reserved.
+#
+# $Id$
+#
+# This is a very cut down version of wrap.tcl. We don't want to
+# use wrap.tcl because that will create yet another Tcl subprocess
+# to execute the test. We want to open the test program directly
+# here so that we get the pid for the program (not the Tcl shell)
+# and watch_procs can kill the program if needed.
+
+source ./include.tcl
+source $test_path/test.tcl
+
+# Arguments:
+if { $argc != 2 } {
+ puts "FAIL: wrap_reptest.tcl: Usage: wrap_reptest.tcl argfile log"
+ exit
+}
+
+set argfile [lindex $argv 0]
+set logfile [lindex $argv 1]
+
+# Create a sentinel file to mark our creation and signal that watch_procs
+# should look for us.
+set parentpid [pid]
+set parentsentinel $testdir/begin.$parentpid
+set f [open $parentsentinel w]
+close $f
+
+# Create a Tcl subprocess that will actually run the test.
+set argf [open $argfile r]
+set progargs [read $argf]
+close $argf
+set cmd [open "| $util_path/db_reptest $progargs >& $logfile" w]
+set childpid [pid $cmd]
+
+puts "Script watcher process $parentpid launching db_reptest process $childpid to $logfile."
+set childsentinel $testdir/begin.$childpid
+set f [open $childsentinel w]
+close $f
+
+# Close the pipe. This will flush the above commands and actually run the
+# test, and will also return an error a la exec if anything bad happens
+# to the subprocess. The magic here is that closing a pipe blocks
+# and waits for the exit of processes in the pipeline, at least according
+# to Ousterhout (p. 115).
+set ret [catch {close $cmd} res]
+
+# Write ending sentinel files--we're done.
+set f [open $testdir/end.$childpid w]
+close $f
+set f [open $testdir/end.$parentpid w]
+close $f
+
+error_check_good "($childpid: db_reptest $progargs: logfile $logfile)"\
+ $ret 0
+exit $ret