You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

407 lines
19 KiB

4 years ago
-- ------------------------------------------------------------------------------ --
-- TradeSkillMaster_AuctionDB --
-- http://www.curse.com/addons/wow/tradeskillmaster_auctiondb --
-- --
-- A TradeSkillMaster Addon (http://tradeskillmaster.com) --
-- All Rights Reserved* - Detailed license information included with addon. --
-- ------------------------------------------------------------------------------ --
-- load the parent file (TSM) into a local variable and register this file as a module
local TSM = select(2, ...)
local Scan = TSM:NewModule("Scan", "AceEvent-3.0")
local L = LibStub("AceLocale-3.0"):GetLocale("TradeSkillMaster_AuctionDB") -- loads the localization table
Scan.groupScanStartTime = 0
4 years ago
Scan.groupScanData = {}
Scan.filterList = {}
Scan.numFilters = 0
Scan.fullScanStartTime = 0
Scan.fullScanSecondsPerPage = -1
Scan.fullScanCompleteElapsed = nil
4 years ago
local verifyNewAlgorithm = false -- DEVELOPERS: Set to "true" to validate and benchmark the new market data algorithm!
4 years ago
local function FullScanCallback(event, ...)
4 years ago
if event == "SCAN_PAGE_UPDATE" then
-- We're running a "Full Scan" and have received an auction page.
-- NOTE: These normal per-page scans receive 50 items per page, and will
-- successfully download ALL auctions on private servers, thanks to pagination.
-- For example, a full scan retrieves all pages of 50 items each,
-- meaning that it covers the entire auction list.
4 years ago
local page, total = ...
-- Calculate the current page progress and the remainder as floating-point values.
local progress_float = page / total
local remaining_float = 1.0 - progress_float
-- Estimate the total scan time, based on a MIX of the average per-page so far,
-- and the previous scan's averages stored in the database (if available).
-- NOTE: This callback triggers after we RECEIVED "page", so we count "page" too.
-- NOTE: We don't do any "live" updates of the progress bar text. We only
-- update the text labels when we receive a page, which is very CPU-efficient.
local time_estimate_str = ""
if (page >= 1) and (total > page) then
-- Calculate how many seconds have elapsed per page-request so far.
-- NOTE: We time it via the less-precise "time()" function, which
-- bluntly returns whole seconds. The alternative would be to use
-- "debugprofilestop()", which has millisecond-precision, but breaks
-- if another addon calls "debugprofilestart()" (which resets that
-- timer back to zero). It doesn't really matter, since our "seconds
-- per page" is constantly re-calculated based on the latest "total
-- amount of whole seconds elapsed", so it doesn't accumulate any
-- rounding errors and gets more precise the more pages have been
-- downloaded (after ~10 pages, it's practically as accurate as the
-- debug-timer). We have to use this technique for safety!
-- NOTE: Most servers will gradually slow down the page requests
-- across the first 300 requests or so, which will become slower
-- and slower, which means that the initial time estimate will
-- grow until it settles on the correct time remainder. There's
-- nothing we can do to predict those gradual slowdowns / throttling,
-- which is why we're also storing the last scan's "final average"
-- in the database and using that for our subsequent scan estimates.
local seconds_elapsed = abs(time() - Scan.fullScanStartTime)
local seconds_per_page = seconds_elapsed / page
-- local pages_remaining = total - page -- Not used for anything.
-- Remember our "real", unweighted value, for later DB storage.
Scan.fullScanSecondsPerPage = seconds_per_page
-- Calculate a smoothly weighted "seconds per page" value based on
-- a linear mix between the current "seconds per page" and the
-- stored "final seconds per page value" from our previous scan.
-- As we reach 100%, we'll use 100% of the current "real seconds
-- per page". But at 0%, we'll use the stored value instead.
-- Between that, we linearly fade the values so that we react
-- smoothly to changes in speed. This solves the issue that all
-- servers face, which is their gradual slowdown of page fetches,
-- where they start out very fast (such as 1.1 seconds per page),
-- but will have slowed down when you're at the end (such as 2.5 per
-- page). Typical server slowdown in speed is roughly linear, which
-- is why our linear blend between "current estimate" and "finished
-- estimate from previous scan" creates the most accurate results
-- we're able to get, given the server behavior. It should also
-- work perfectly on servers which don't follow this pattern, such
-- as if they have a perfectly linear time between all pages without
-- any throttling at all, in which case both estimates will basically
-- agree anyway (both the current and the saved value). This is the
-- best we can do with the facts of the game. A totally accurate
-- estimate is impossible, but we're as accurate as we can be.
-- NOTE: This estimate cannot be improved, since practically all
-- servers apply random throttling, have various loads and slowdowns
-- throughout the day, etc. This is the best we can do since the
-- actual speed depends on the server and is pretty unpredictable.
-- It would be like trying to predict "the total download-time of a
-- file that keeps fluctuating between fast and slow speeds". The
-- best we can do is estimate based on current and previous speeds.
local last_scan_seconds_per_page = TSM.db.realm.lastScanSecondsPerPage
if last_scan_seconds_per_page and last_scan_seconds_per_page > 0 then
-- TSM:Print(format("Read from DB: %f (Our unweighted estimate: %f)", last_scan_seconds_per_page, seconds_per_page)) -- DEBUG
seconds_per_page = (seconds_per_page * progress_float) + (last_scan_seconds_per_page * remaining_float)
-- TSM:Print(format("New, weighted estimate: %f (Progress: %f / Remaining: %f)", seconds_per_page, progress_float, remaining_float)) -- DEBUG
else
-- TSM:Print(format("Nothing in DB yet (Our unweighted estimate: %f)", seconds_per_page)) -- DEBUG
end
-- Estimate the "total time" requirement for ALL pages, rounded to
-- the nearest whole second, at least 1 second.
-- NOTE: We calculate the total estimate instead of the "remaining
-- time", because servers tend to fluctuate constantly between slowly
-- and then quickly sending the pages, which means a "pages_remaining"
-- timer is hard to understand in terms of real time remaining, since
-- we might get 10 pages within a few seconds and count down their
-- "seconds per page" amounts much faster than natural time, and
-- then suddenly stall for 30 seconds without getting any pages.
-- So a "remaining time" estimate would not move naturally. Instead,
-- we use a constantly updating "total time estimate" which follows
-- the server performance beautifully and is easy to understand.
-- NOTE: Thanks to the linear blend between historical and current
-- server performance, our estimate is very accurate yet responsive.
-- NOTE: The total page count is able to change during AH scan, when
-- more auctions are added or removed, which further contributes to
-- the confusion if we would use a "time remaining" display instead,
-- but since we use a "total time" estimate the user instead smoothly
-- sees the total estimate change when the page count changes.
local seconds_total_estimate = max(1, floor((total * seconds_per_page) + 0.5))
-- Convert the "elapsed / estimated" seconds into hours, minutes and seconds.
time_estimate_str = format(" (%s / ~%s)", TSMAPI:FormatHMS(TSMAPI:SecondsToHMS(seconds_elapsed)), TSMAPI:FormatHMS(TSMAPI:SecondsToHMS(seconds_total_estimate)))
end
-- Calculate progress bar from 0-100%.
local progress_bar = min(100 * progress_float, 100)
-- Display the progress bar with the time estimate.
TSM.GUI:UpdateStatus(format(L["Scanning page %s/%s"], page, total) .. time_estimate_str, progress_bar)
4 years ago
elseif event == "SCAN_COMPLETE" then
-- The whole scan is complete, and wasn't interrupted by the player.
-- Store the final "seconds elapsed per page request" into the database.
-- NOTE: We only update it here after complete scans, to avoid poisoning
-- with incorrect, partial-scan estimates, since most servers heavily
-- slow down their page requests over time. The completed scan is the truth.
if Scan.fullScanSecondsPerPage > 0 then
TSM.db.realm.lastScanSecondsPerPage = Scan.fullScanSecondsPerPage
end
-- Calculate how many seconds the completed "Full Scan" took.
-- NOTE: We must cache it in this external variable, because "Full Scans"
-- use a threading callback which calls "DoneScanning()" one more time,
-- so we preserve the value to still display it via that callback too.
Scan.fullScanCompleteElapsed = abs(time() - Scan.fullScanStartTime)
-- Now process all of the fetched auctions, and display the total time elapsed.
4 years ago
local data = ...
Scan:ProcessScanData(data)
Scan:DoneScanning(Scan.fullScanCompleteElapsed)
elseif event == "SCAN_INTERRUPTED" or event == "INTERRUPTED" then
-- We've been interrupted by the Auction House closing.
-- NOTE: "SCAN_INTERRUPTED" is from LibAuctionScan-1.0, which isn't used
-- by TSM anymore, and "INTERRUPTED" is from "TSM/Auction/AuctionScanning.lua",
-- which is what this scanner uses nowadays.
local data = ...
Scan:ProcessScanData(data)
4 years ago
Scan:DoneScanning()
end
end
local function GroupScanCallback(event, ...)
if event == "QUERY_COMPLETE" then
local filterList = ...
local numItems = 0
for _, v in ipairs(filterList) do
numItems = numItems + #v.items
end
Scan.filterList = filterList
Scan.numFilters = #filterList
Scan:ScanNextGroupFilter()
elseif event == "QUERY_UPDATE" then
local current, total = ...
TSM.GUI:UpdateStatus(format(L["Preparing Filter %d / %d"], current, total))
elseif event == "SCAN_INTERRUPTED" or event == "INTERRUPTED" then
-- We've been interrupted by the Auction House closing.
-- NOTE: "SCAN_INTERRUPTED" is from LibAuctionScan-1.0, which isn't used
-- by TSM anymore, and "INTERRUPTED" is from "TSM/Auction/AuctionScanning.lua",
-- which is what this scanner uses nowadays.
4 years ago
Scan:DoneScanning()
elseif event == "SCAN_TIMEOUT" then
tremove(Scan.filterList, 1)
Scan:ScanNextGroupFilter()
elseif event == "SCAN_PAGE_UPDATE" then
local page, total = ...
-- We have now received at least 1 page for this item. Show how many pages remain.
-- NOTE: We can't provide any time estimate here, since the other group sizes are unknown.
-- NOTE: We use this particular item's page-progress as the progress bar.
-- NOTE: We add "+1" to the page counter, to indicate that we've received that page and are working on the next page.
local progress_bar = min(100*(page/total), 100) -- Calculate progress bar from 0-100%.
TSM.GUI:UpdateStatus(format(L["Scanning %d / %d (Page %d / %d)"], ((Scan.numFilters-#Scan.filterList) + 1), Scan.numFilters, min(page + 1, total), total), nil, progress_bar)
4 years ago
elseif event == "SCAN_COMPLETE" then
local data = ...
for _, itemString in ipairs(Scan.filterList[1].items) do
if not Scan.groupScanData[itemString] then
Scan.groupScanData[itemString] = data[itemString]
end
end
tremove(Scan.filterList, 1)
Scan:ScanNextGroupFilter()
end
end
function Scan:ScanNextGroupFilter(data)
if #Scan.filterList == 0 then
-- Calculate how many seconds the completed "Group Scan" took.
local seconds_elapsed = abs(time() - Scan.groupScanStartTime)
-- Now process all of the fetched auctions, and display the total time elapsed.
4 years ago
Scan:ProcessScanData(Scan.groupScanData)
Scan:DoneScanning(seconds_elapsed)
4 years ago
return
end
-- Apply the temporary label for when we've requested the item's 1st page,
-- but we don't yet know how many results or pages there are for this item.
-- NOTE: We can't provide any time estimate here, since the other group sizes are unknown.
-- NOTE: In the label, we count the items starting at 1, to say "Scanning 1 / 2"
-- (instead of "Scanning 0 / 2"), but for the progress bar we count starting
-- from 0, so that it fills up properly by only proceeding after an item is done.
local progress_bar = min(100*((Scan.numFilters-#Scan.filterList)/Scan.numFilters), 100) -- Calculate progress bar from 0-100%.
TSM.GUI:UpdateStatus(format(L["Scanning %d / %d (Page 1 / ?)"], ((Scan.numFilters-#Scan.filterList) + 1), Scan.numFilters), progress_bar)
4 years ago
TSMAPI.AuctionScan:RunQuery(Scan.filterList[1], GroupScanCallback)
end
function Scan:StartGroupScan(items)
Scan.isScanning = "Group"
Scan.groupItems = items
wipe(Scan.filterList)
wipe(Scan.groupScanData)
Scan.numFilters = 0
TSMAPI.AuctionScan:StopScan()
TSM.GUI:SetPauseEnabled(true)
TSM.GUI:SetPaused(false)
Scan.groupScanStartTime = time() -- Keep track of when we started the "Group Scan".
4 years ago
TSMAPI:GenerateQueries(items, GroupScanCallback)
TSM.GUI:UpdateStatus(L["Preparing Filters..."])
end
function Scan:StartFullScan()
Scan.isScanning = "Full"
TSM.GUI:UpdateStatus(L["Running query..."])
Scan.groupItems = nil
TSMAPI.AuctionScan:StopScan()
TSM.GUI:SetPauseEnabled(true)
TSM.GUI:SetPaused(false)
Scan.fullScanStartTime = time() -- Keep track of when we started the "Full Scan".
Scan.fullScanSecondsPerPage = -1 -- Reset the page-speed timer.
Scan.fullScanCompleteElapsed = nil -- Reset the "full scan completed" information.
TSMAPI.AuctionScan:RunQuery({name=""}, FullScanCallback)
4 years ago
end
function Scan:DoneScanning(seconds_elapsed)
if seconds_elapsed then
-- If given the "time elapsed", display it as "Done Scanning (1:35:27)".
TSM.GUI:UpdateStatus(format("%s (%s)", L["Done Scanning"], TSMAPI:FormatHMS(TSMAPI:SecondsToHMS(seconds_elapsed))), 100)
else
-- Used when we don't care about showing time (such as scan failures).
TSM.GUI:UpdateStatus(L["Done Scanning"], 100)
end
4 years ago
Scan.isScanning = nil
TSM.GUI:SetPauseEnabled(false)
TSM.GUI:SetPaused(false)
end
function Scan:PauseScan()
if not Scan.isScanning then return end
if TSMAPI.AuctionScan:PauseScan("AuctionDB") then
TSM.GUI:SetPaused(true)
TSM.GUI:UpdateStatus(L["Scan Paused"])
end
end
function Scan:ResumeScan()
if not Scan.isScanning then return end
if not TSMAPI.AuctionScan:IsScanning() then
Scan:DoneScanning()
return
end
if TSMAPI.AuctionScan:ResumeScan() then
TSM.GUI:SetPaused(false)
TSM.GUI:UpdateStatus(L["Resuming Scan..."])
end
end
function Scan:TogglePause()
if TSMAPI.AuctionScan:IsPaused() then
Scan:ResumeScan()
else
Scan:PauseScan()
end
4 years ago
end
function Scan:ProcessScanData(scanData)
-- Handle scans performed via "Full Scan" and "Group Scan".
4 years ago
local data = {}
for itemString, obj in pairs(scanData) do
if TSMAPI:GetBaseItemString(itemString) == itemString then
local itemID = obj:GetItemID()
local quantity, minBuyout = 0, 0
local records = {}
for _, record in ipairs(obj.records) do
-- Only process this auction if we saw a valid buyout price (ignore bid-only auctions, etc).
if record.buyout and record.buyout > 0 then
-- Calculate the price per item, always rounded downwards.
-- NOTE: "GetItemBuyout" returns nil if no buyout or if buyout is "0".
local itemBuyout = record:GetItemBuyout()
if itemBuyout then
-- Calculate the lowest "per-item buyout price" we're seeing for this item.
if (itemBuyout < minBuyout or minBuyout == 0) then
minBuyout = itemBuyout
end
-- Count the total amount of this item that exists on the auction house (adds together all stacks).
quantity = quantity + record.count
-- BRAINDEAD OLD TSM CODE WHICH ADDS 1 RECORD PER ITEM IN THE STACK,
-- MEANING 500 STACKS OF 1000 ARROWS WOULD BE HALF A MILLION TABLE
-- ROWS AND WOULD LEAD TO "OUT OF MEMORY" ERRORS. DON'T DO THIS!
-- for i=1, record.count do
-- tinsert(records, itemBuyout)
-- end
-- Rewritten, intelligent code which adds 1 record per "stack" (auction) instead.
tinsert(records, {record.count, itemBuyout})
end
4 years ago
end
end
-- Add this item to "data to process" even if there's zero records,
-- which can happen if they're all bid-only auctions.
-- NOTE: Empty records are fine; ProcessData ignores items without buyout prices.
-- NOTE: Empty records are totally fine either way, since "ProcessData"
-- simply ignores items that don't contain any buyout prices.
-- NOTE: If no buyout records were found, the "minBuyout" and "quantity"
-- fields below both have the default value of "0" (initialized above).
4 years ago
data[itemID] = {records=records, minBuyout=minBuyout, quantity=quantity}
end
end
-- Mark the collected auction data as a new "complete scan" with today's date,
-- but only if this was a normal "Full Scan" (not just a "TSM item group" scan).
4 years ago
if Scan.isScanning ~= "group" then
TSM.db.realm.lastCompleteScan = time()
4 years ago
end
-- Process the collected auction data.
TSM.Data:ProcessData(data, Scan.groupItems, verifyNewAlgorithm)
4 months ago
if TSM.ChannelSync then
4 months ago
TSM.ChannelSync:BroadcastScanData(Scan.isScanning, Scan.groupItems)
4 months ago
end
4 years ago
end
function Scan:ProcessImportedData(auctionData)
-- Handle manually imported auction scan data.
-- NOTE: This function is deprecated? Nothing seems to call it, unless they're
-- somehow calling it via another non-named technique, or perhaps it's internal
-- for developer-use only (basically just a quick way to emulate a full scan).
4 years ago
local data = {}
4 years ago
for itemID, auctions in pairs(auctionData) do
-- Process all imported auction records for this item.
local quantity, minBuyout = 0, 0
local records = {}
4 years ago
for _, auction in ipairs(auctions) do
-- Fetch the "price per item" and "item-count in this stack" from the auction's data.
-- NOTE: We only import auctions with per-item buyout values (ignore bid-only auctions, etc).
4 years ago
local itemBuyout, count = unpack(auction)
if itemBuyout then
-- Calculate the lowest "per-item buyout price" we're seeing for this item.
if (itemBuyout < minBuyout or minBuyout == 0) then
minBuyout = itemBuyout
end
-- Count the total amount of this item that exists on the auction house (adds together all stacks).
quantity = quantity + count
-- BRAINDEAD OLD TSM CODE WHICH ADDS 1 RECORD PER ITEM IN THE STACK,
-- MEANING 500 STACKS OF 1000 ARROWS WOULD BE HALF A MILLION TABLE
-- ROWS AND WOULD LEAD TO "OUT OF MEMORY" ERRORS. DON'T DO THIS!
-- for i=1, count do
-- tinsert(records, itemBuyout)
-- end
-- Rewritten, intelligent code which adds 1 record per "stack" (auction) instead.
tinsert(records, {count, itemBuyout})
4 years ago
end
end
-- Add this item to "data to process" even if there's zero records,
-- which can happen if they're all bid-only auctions.
4 years ago
data[itemID] = {records=records, minBuyout=minBuyout, quantity=quantity}
end
-- Process the imported auction data as a new "complete scan" with today's date.
TSM.db.realm.lastCompleteScan = time()
TSM.Data:ProcessData(data, nil, verifyNewAlgorithm)
4 years ago
end