Processor Usage history

Last Post 09 Jun 2014 04:50 AM by Olu. 9 Replies.
AddThis - Bookmarking and Sharing Button Printer Friendly
  •  
  •  
  •  
  •  
  •  
Sort:
PrevPrev NextNext
You are not authorized to post a reply.
Author Messages
Olu
New Member
New Member

--
09 Jun 2014 04:22 AM
-- Get CPU Utilization History (SQL 2008 Only)
DECLARE @ts_now bigint = (SELECT cpu_ticks/(cpu_ticks/ms_ticks)FROM sys.dm_os_sys_info);

SELECT TOP(10) SQLProcessUtilization AS [SQL Server Process CPU Utilization],
SystemIdle AS [System Idle Process],
100 - SystemIdle - SQLProcessUtilization AS [Other Process CPU Utilization],
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) AS [Event Time]
FROM (
SELECT record.value('(./Record/@id)[1]', 'int') AS record_id,
record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int')
AS [SystemIdle],
record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]',
'int')
AS [SQLProcessUtilization], [timestamp]
FROM (
SELECT [timestamp], convert(xml, record) AS [record]
FROM sys.dm_os_ring_buffers
WHERE ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR'
AND record LIKE '%%') AS x
) AS y
ORDER BY record_id DESC;

/*use this to review relative processor performance*/

http://www.spec.org/cpu2006/results/res2012q2/" target="_blank" rel="nofollow">http://www.spec.org/cpu2006/results/res2012q2/cpu2006-20120423-21276.pdf" target="_blank" rel="nofollow">http://www.spec.org/cpu2006/results...-21276.pdf

http://h20195.www2.hp.com/V2/GetPDF...426ENW.pdf

http://www.spec.org/cpu2006/results/res2012q2/" target="_blank" rel="nofollow">http://www.spec.org/cpu2006/results/res2012q2/

http://www.spec.org/cpu2006/results/res2012q2/" target="_blank" rel="nofollow">http://www.spec.org/cpu2006/results/res2012q2/

http://www.spec.org/cpu2006/results/res2014q1/
Olu
New Member
New Member

--
09 Jun 2014 04:27 AM
/*use view to determine the current disk io profile*/

CREATE VIEW [dbo].[vw_disk_io]

AS

SELECT
getdate() msg_date,
sample_ms bigint,
vfs.[database_id],
mf.[name],
io_stall_write_ms,
io_stall_read_ms,
io_stall,
num_of_reads,
num_of_writes,
num_of_bytes_written,
num_of_bytes_read,
physical_name
FROM sys.dm_io_virtual_file_stats (NULL,NULL) AS [vfs]
JOIN sys.master_files AS [mf]
ON [vfs].[database_id] = [mf].[database_id]
AND [vfs].[file_id] = [mf].[file_id]

Olu
New Member
New Member

--
09 Jun 2014 04:30 AM
/*quick query to review current status of various sessions..used to snapshot current state at various intervals*/



select
getdate() msg_date,
r.session_id spid,
r.blocking_session_id as blocker,
r.command cmd,
SUBSTRING(st.text, (sp.stmt_start/2)+1,
((CASE sp.stmt_end
WHEN -1 THEN DATALENGTH(st.text)
WHEN 0 THEN DATALENGTH(st.text)
ELSE sp.stmt_end
END - sp.stmt_start)/2) + 1) AS active_sql,
s.status ,
r.open_transaction_count open_tran,
r.last_wait_type,
r.wait_type,
r.wait_time,
r.wait_resource,
s.[host_name],
s.login_name,
db_name(r.database_id) database_name,
db_name(dt.database_id) xact_database_name,
object_name(st.[objectid]) obj,
r.cpu_time,
s.memory_usage,
round(cast(mg.requested_memory_kb/1024 as decimal),1) requested_memory_mb,
round(cast(mg.granted_memory_kb/1024 as decimal),1) granted_memory_mb,
round(cast(mg.required_memory_kb/1024 as decimal),1) required_memory_mb,
round(cast(mg.used_memory_kb/1024 as decimal),1) used_memory_mb,
round(cast(mg.query_cost as decimal),1) query_cost,
mg.dop degree_of_parallelism,
round(cast(su.tempdb_internal_objects_alloc_Mb as decimal),1) tempdb_internal_alloc_Mb,
round(cast(su.tempdb_user_objects_alloc_Mb as decimal),1) tempdb_user_alloc_Mb,
round(cast(su.tempdb_internal_objects_dealloc_Mb as decimal),1) tempdb_internal_dealloc_Mb,
round(cast(su.tempdb_user_objects_dealloc_Mb as decimal),1) tempdb_user_dealloc_Mb,
sp.physical_io,
s.host_process_id,
dt.database_transaction_begin_time,
s.last_request_start_time as last_batch_started,
ISNULL(r.granted_query_memory, 0 )granted_query_memory ,
s.login_time,
s.last_request_start_time,
s.logical_reads,
ISNULL(r.percent_complete,0) percent_complete,
s.reads,s.writes,s.row_count,
ISNULL(dt.[database_transaction_log_bytes_used], 0) AS [Log Bytes Used],
ISNULL(dt.[database_transaction_log_bytes_reserved], 0) AS [Log Bytes Rsvd],
ISNULL(r.scheduler_id, -1) as scheduler_id, -- (-1 is inactive)
s.[program_name],
r.plan_handle,
r.sql_handle [sql_handle],
r.statement_start_offset,
r.statement_end_offset
from sys.dm_exec_sessions s (nolock)

left join sys.dm_exec_requests r (nolock) on s.session_id = r.session_id
CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) AS st
left join sys.dm_tran_session_transactions t (nolock) on t.session_id = s.session_id
left join sys.dm_tran_database_transactions dt (nolock) on dt.transaction_id = t.transaction_id
left join master.dbo.sysprocesses sp (nolock) on s.session_id = sp.spid
left join sys.dm_exec_query_memory_grants mg (nolock) on mg.session_id = sp.spid
left join (SELECT session_id,
(SUM(internal_objects_alloc_page_count)*1.0/128) AS tempdb_internal_objects_alloc_Mb,
(SUM(user_objects_alloc_page_count)*1.0/128) AS tempdb_user_objects_alloc_Mb,
(SUM(internal_objects_dealloc_page_count)*1.0/128)AS tempdb_internal_objects_dealloc_Mb,
(SUM(user_objects_dealloc_page_count)*1.0/128) AS tempdb_user_objects_dealloc_Mb
FROM sys.dm_db_session_space_usage ssu (nolock)
GROUP BY session_id) su
on su.session_id = sp.spid
where r.session_id <> @@spid
Olu
New Member
New Member

--
09 Jun 2014 04:33 AM
/* Quick guide to table compression(page/row) and partitioning*/

declare @largest_index_mb int
set @largest_index_mb = 20000
SELECT
DISTINCT
c.Table_Name,
COUNT(c.Index_Name) [partitions],
c.Index_Name,
Index_Type,
SUM(ROW_COUNT) ROW_COUNT,
SUM([RsvdPage mb])[RsvdPage mb],
SUM(total_activity)total_activity,
CAST(AVG(Percent_Insert) AS DECIMAL(10,2)) Percent_Insert,
CAST(AVG(Percent_Update)AS DECIMAL(10,2)) Percent_Update,
CAST(AVG(Percent_Delete)AS DECIMAL(10,2)) Percent_Delete,
CAST(AVG(Percent_singleton_lookup_count)AS DECIMAL(10,2)) Percent_singleton_lookup_count,
CAST(AVG(Percent_Scan)AS DECIMAL(10,2)) Percent_Scan,
CAST(AVG(Percent_Merge)AS DECIMAL(10,2)) Percent_Merge,
data_compression_desc [current_compression],
CASE
WHEN CAST(AVG(Percent_Scan)AS DECIMAL(10,2)) > 60 THEN 'PAGE'
WHEN CAST(AVG(Percent_Update)AS DECIMAL(10,2)) > 60 THEN 'ROW'
ELSE 'More analysis reqd'
END suggested_compression,
CASE
WHEN CAST(AVG(Percent_Scan)AS DECIMAL(10,2)) > 60 THEN 'YES'
WHEN CAST(AVG(Percent_singleton_lookup_count)AS DECIMAL(10,2)) > 60 OR SUM([RsvdPage mb]) < @largest_index_mb THEN 'NO'
ELSE 'More analysis reqd'
END [Do I Partition?]
FROM
(
SELECT distinct o.name AS [Table_Name], x.name AS [Index_Name],
i.partition_number AS [Partition],
i.index_id AS [Index_ID], x.type_desc AS [Index_Type],
ps.[row_count],
[RsvdPage mb] = CAST(CONVERT(DECIMAL(19,2),ps.reserved_page_count) * 8/1024 AS INT) ,
[UsedPage mb] = CAST(CONVERT(DECIMAL(19,2),ps.used_page_count) *8/1024 AS INT),
(i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + i.leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count
) total_activity,
cast(i.leaf_insert_count * 100.0 /
(i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + i.leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count
) as decimal(10,3)) AS [Percent_Insert],
cast(i.leaf_update_count * 100.0 /
(i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + i.leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count
) as decimal(10,3)) AS [Percent_Update],
cast (i.leaf_delete_count * 100.0/
(i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + i.leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count
) as decimal(10,3)) AS [Percent_Delete],
cast(i.singleton_lookup_count * 100.0/
(i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + i.leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count
) as decimal(10,3)) AS [Percent_singleton_lookup_count],
cast (i.range_scan_count * 100.0 /
(i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + i.leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count
) as decimal(10,3)) AS [Percent_Scan],
cast(i.leaf_page_merge_count * 100.0 /
(i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + i.leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count
) as decimal(10,3)) AS [Percent_Merge], p.data_compression_desc
FROM sys.dm_db_index_operational_stats (db_id(), NULL, NULL, NULL) i
JOIN sys.objects o ON o.object_id = i.object_id
JOIN sys.indexes x ON x.object_id = i.object_id AND x.index_id = i.index_id
JOIN sys.dm_db_partition_stats ps ON ps.object_id = x.object_id AND ps.index_id = x.index_id
AND ps.partition_number = i.partition_number
JOIN sys.partitions p ON ps.object_id = p.object_id AND ps.index_id = p.index_id
--AND ps.partition_number = p.partition_number
WHERE (i.range_scan_count + i.leaf_insert_count
+ i.leaf_delete_count + leaf_update_count
+ i.leaf_page_merge_count + i.singleton_lookup_count) != 0
AND objectproperty(i.object_id,'IsUserTable') = 1 AND row_count > 0
) AS c
GROUP BY
c.Table_Name,
c.Index_Name,
Index_Type,
data_compression_desc
Olu
New Member
New Member

--
09 Jun 2014 04:37 AM
/*good article on resource semaphore waits*/
http://blogs.msdn.com/b/sqlquerypro...grant.aspx

/* Paul Randal's resource Wait with %Waitcount*/
WITH [Waits] AS
(SELECT
[wait_type],
[wait_time_ms] / 1000.0 AS [WaitS],
([wait_time_ms] - [signal_wait_time_ms]) / 1000.0 AS [ResourceS],
[signal_wait_time_ms] / 1000.0 AS [SignalS],
[waiting_tasks_count] AS [WaitCount],
100.0 * [wait_time_ms] / SUM ([wait_time_ms]) OVER() AS [wait_time %],
100.0 * [waiting_tasks_count] / SUM ([waiting_tasks_count]) OVER() AS [wait_count %],
ROW_NUMBER() OVER(ORDER BY [wait_time_ms] DESC) AS [RowNum]
FROM sys.dm_os_wait_stats
WHERE [wait_type] NOT IN (
N'CLR_SEMAPHORE', N'LAZYWRITER_SLEEP',
N'RESOURCE_QUEUE', N'SQLTRACE_BUFFER_FLUSH',
N'SLEEP_TASK', N'SLEEP_SYSTEMTASK',
N'WAITFOR', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION',
N'CHECKPOINT_QUEUE', N'REQUEST_FOR_DEADLOCK_SEARCH',
N'XE_TIMER_EVENT', N'XE_DISPATCHER_JOIN',
N'LOGMGR_QUEUE', N'FT_IFTS_SCHEDULER_IDLE_WAIT',
N'BROKER_TASK_STOP', N'CLR_MANUAL_EVENT',
N'CLR_AUTO_EVENT', N'DISPATCHER_QUEUE_SEMAPHORE',
N'TRACEWRITE', N'XE_DISPATCHER_WAIT',
N'BROKER_TO_FLUSH', N'BROKER_EVENTHANDLER',
N'FT_IFTSHC_MUTEX', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
N'DIRTY_PAGE_POLL', N'SP_SERVER_DIAGNOSTICS_SLEEP', N'ONDEMAND_TASK_QUEUE')
)
SELECT
[W1].[wait_type] AS [WaitType],
CAST ([W1].[WaitS] AS DECIMAL(14, 2)) AS [Wait_S],
CAST ([W1].[ResourceS] AS DECIMAL(14, 2)) AS [Resource_S],
CAST ([W1].[SignalS] AS DECIMAL(14, 2)) AS [Signal_S],
[W1].[WaitCount] AS [WaitCount],
CAST ([W1].[wait_time %] AS DECIMAL(4, 2)) AS [wait_time %],
CAST ([W1].[wait_count %] AS DECIMAL(4, 2)) AS [wait_count %],
CAST (([W1].[WaitS] / [W1].[WaitCount]) AS DECIMAL (14, 4)) AS [AvgWait_S],
CAST (([W1].[ResourceS] / [W1].[WaitCount]) AS DECIMAL (14, 4)) AS [AvgRes_S],
CAST (([W1].[SignalS] / [W1].[WaitCount]) AS DECIMAL (14, 4)) AS [AvgSig_S]
FROM [Waits] AS [W1]
INNER JOIN [Waits] AS [W2]
ON [W2].[RowNum] <= [W1].[RowNum]
GROUP BY [W1].[RowNum], [W1].[wait_type], [W1].[WaitS],
[W1].[ResourceS], [W1].[SignalS], [W1].[WaitCount], [W1].[wait_time %], [W1].[wait_count %]
HAVING SUM ([W2].[wait_time %] ) - [W1].[wait_time %] < 99; -- percentage threshold


/*when was the last time waits was cleared?*/
/*================================================*/
/*Determines when the wait stats was last cleared */
/*================================================*/

SELECT
[wait_type],
[wait_time_ms],
--DATEADD(ms,-[wait_time_ms],getdate()) AS [Date/TimeCleared],
DATEADD(SS,-wait_time_ms/1000,getdate()) AS [Date/TimeCleared],
CASE
WHEN [wait_time_ms] < 1000 THEN CAST([wait_time_ms] AS VARCHAR(15)) + ' ms'
WHEN [wait_time_ms] between 1000 and 60000 THEN CAST(([wait_time_ms]/1000) AS VARCHAR(15)) + ' seconds'
WHEN [wait_time_ms] between 60001 and 3600000 THEN CAST(([wait_time_ms]/60000) AS VARCHAR(15)) + ' minutes'
WHEN [wait_time_ms] between 3600001 and 86400000 THEN CAST(([wait_time_ms]/3600000) AS VARCHAR(15)) + ' hours'
WHEN [wait_time_ms] > 86400000 THEN CAST(([wait_time_ms]/86400000) AS VARCHAR(15)) + ' days'
END [TimeSinceCleared]
FROM [sys].[dm_os_wait_stats]
WHERE [wait_type] in ('SQLTRACE_INCREMENTAL_FLUSH_SLEEP', 'SQLTRACE_BUFFER_FLUSH')

/*good info on latch classes and their meaning*/
http://www.sqlskills.com/blogs/paul...they-mean/
Olu
New Member
New Member

--
09 Jun 2014 04:39 AM
/*Spinlocks and spinlock backoffs*/
/*
This script is provided "AS IS" with no warranties, and confers no rights.

Use: This procedure will monitor for spinlocks with a high number of backoff events
over a defined time period which would indicate that there is likely significant
spin lock contention.

Modify the variables noted below before running.


Requires:
xp_cmdshell to be enabled
sp_configure 'xp_cmd', 1
go
reconfigure
go

*********************************************************************************************************/
use tempdb
go
if object_id('sp_xevent_dump_on_backoffs') is not null
drop proc sp_xevent_dump_on_backoffs
go
create proc sp_xevent_dump_on_backoffs
(
@sqldumper_path nvarchar(max) = '"c:\Program Files\Microsoft SQL Server\100\Shared\SqlDumper.exe"'
,@dump_threshold int = 500 --capture mini dump when the slot count for the top bucket exceeds this
,@total_delay_time_seconds int = 60 --poll for 60 seconds
,@PID int = 0
,@output_path nvarchar(max) = 'c:\'
,@dump_captured_flag int = 0 OUTPUT

)
as
/*
--Find the spinlock types
select map_value, map_key, name from sys.dm_xe_map_values
where name = 'spinlock_types'
order by map_value asc

--Example: Get the type value for any given spinlock type
select map_value, map_key, name from sys.dm_xe_map_values
where map_value IN ('SOS_CACHESTORE', 'LOCK_HASH', 'MUTEX')
*/
if exists (select * from sys.dm_xe_session_targets xst
inner join sys.dm_xe_sessions xs on (xst.event_session_address = xs.address)
where xs.name = 'spinlock_backoff_with_dump')
drop event session spinlock_backoff_with_dump on server

create event session spinlock_backoff_with_dump on server
add event sqlos.spinlock_backoff (action (package0.callstack)
where
type = 61 --LOCK_HASH
--or type = 144 --SOS_CACHESTORE
--or type = 8 --MUTEX
--or type = 53 --LOGCACHE_ACCESS
--or type = 41 --LOGFLUSHQ
--or type = 25 --SQL_MGR
--or type = 39 --XDESMGR
)
add target package0.asynchronous_bucketizer (
set filtering_event_name='sqlos.spinlock_backoff',
source_type=1, source='package0.callstack')
with (MAX_MEMORY=50MB, MEMORY_PARTITION_MODE = PER_NODE)

alter event session spinlock_backoff_with_dump on server state=start


declare @instance_name nvarchar(max) = @@SERVICENAME
declare @loop_count int = 1
declare @xml_result xml
declare @slot_count bigint
declare @xp_cmdshell nvarchar(max) = null

--start polling for the backoffs
print 'Polling for: ' + convert(varchar(32), @total_delay_time_seconds) + ' seconds'
while (@loop_count < CAST (@total_delay_time_seconds/1 as int))
begin
waitfor delay '00:00:01'

--get the xml from the bucketizer for the session
select @xml_result= CAST(target_data as xml)
from sys.dm_xe_session_targets xst
inner join sys.dm_xe_sessions xs on (xst.event_session_address = xs.address)
where xs.name = 'spinlock_backoff_with_dump'

--get the highest slot count from the bucketizer
select @slot_count = @xml_result.value(N'(//Slot/@count)[1]', 'int')

--if the slot count is higher than the threshold in the one minute period
--dump the process and clean up session
if (@slot_count > @dump_threshold)
begin
print 'exec xp_cmdshell ''' + @sqldumper_path + ' ' + convert(nvarchar(max), @PID) + ' 0 0x800 0 c:\ '''
select @xp_cmdshell = 'exec xp_cmdshell ''' + @sqldumper_path + ' ' + convert(nvarchar(max), @PID) + ' 0 0x800 0 ' + @output_path + ' '''
exec sp_executesql @xp_cmdshell
print 'loop count: ' + convert (varchar(128), @loop_count)
print 'slot count: ' + convert (varchar(128), @slot_count)
set @dump_captured_flag = 1
break
end

--otherwise loop
set @loop_count = @loop_count + 1

end

--see what was collected then clean up
DBCC traceon (3656, -1)
select event_session_address, target_name, execution_count, cast (target_data as XML)
from sys.dm_xe_session_targets xst
inner join sys.dm_xe_sessions xs on (xst.event_session_address = xs.address)
where xs.name = 'spinlock_backoff_with_dump'

alter event session spinlock_backoff_with_dump on server state=stop
drop event session spinlock_backoff_with_dump on server
go

/* CAPTURE THE DUMPS
******************************************************************/
--Example: This will run continuously until a dump is created.
declare @sqldumper_path nvarchar(max) = '"c:\Program Files\Microsoft SQL Server\100\Shared\SqlDumper.exe"'
declare @dump_threshold int = 300 --capture mini dump when the slot count for the top bucket exceeds this
declare @total_delay_time_seconds int = 60 --poll for 60 seconds
declare @PID int = 0
declare @flag tinyint = 0
declare @dump_count tinyint = 0
declare @max_dumps tinyint = 3 --stop after collecting this many dumps
declare @output_path nvarchar(max) = 'c:\' --no spaces in the path please :)


--Get the process id for sql server
declare @error_log table (LogDate datetime,
ProcessInfo varchar(255),
Text varchar(max)
)
insert into @error_log
exec ('xp_readerrorlog 0, 1, ''Server Process ID''')
select @PID = convert(int, (REPLACE(REPLACE(Text, 'Server Process ID is ', ''), '.', '')))
from @error_log where Text like ('Server Process ID is%')
print 'SQL Server PID: ' + convert (varchar(6), @PID)

--Loop to monitor the spinlocks and capture dumps. while (@dump_count < @max_dumps)
begin

exec sp_xevent_dump_on_backoffs @sqldumper_path = @sqldumper_path,
@dump_threshold = @dump_threshold,
@total_delay_time_seconds = @total_delay_time_seconds,
@PID = @PID,
@output_path = @output_path,
@dump_captured_flag = @flag OUTPUT
if (@flag > 0)
set @dump_count=@dump_count + 1
print 'Dump Count: ' + convert(varchar(2), @dump_count)
waitfor delay '00:00:02'

end
Appendix B: Capturing Spinlocks Statistics Over a Specific Time Period
The following script can be used to look at spinlock statistics over a specific time period. Each time it runs it will return the delta between the current values and previous values collected.
/* Snapshot the current spinlock stats and store so that this can be compared over a time period
Return the statistics between this point in time and the last collection point in time.

**This data is maintained in tempdb so the connection must persist between each execution**
**alternatively this could be modified to use a persisted table in tempdb. if that
is changed code should be included to clean up the table at some point.**
*/

use tempdb
go

declare @current_snap_time datetime
declare @previous_snap_time datetime

set @current_snap_time = GETDATE()

if not exists(select name from tempdb.sys.sysobjects where name like '#_spin_waits%')
create table #_spin_waits
(
lock_name varchar(128)
,collisions bigint
,spins bigint
,sleep_time bigint
,backoffs bigint
,snap_time datetime
)

--capture the current stats
insert into #_spin_waits
(
lock_name
,collisions
,spins
,sleep_time
,backoffs
,snap_time
)
select name
,collisions
,spins
,sleep_time
,backoffs
,@current_snap_time
from sys.dm_os_spinlock_stats

select top 1 @previous_snap_time = snap_time from #_spin_waits
where snap_time < (select max(snap_time) from #_spin_waits)
order by snap_time desc

--get delta in the spin locks stats
select top 10
spins_current.lock_name
, (spins_current.collisions - spins_previous.collisions) as collisions
, (spins_current.spins - spins_previous.spins) as spins
, (spins_current.sleep_time - spins_previous.sleep_time) as sleep_time
, (spins_current.backoffs - spins_previous.backoffs) as backoffs
, spins_previous.snap_time as [start_time]
, spins_current.snap_time as [end_time]
, DATEDIFF(ss, @previous_snap_time, @current_snap_time) as [seconds_in_sample]
from #_spin_waits spins_current
inner join (
select * from #_spin_waits
where snap_time = @previous_snap_time
) spins_previous on (spins_previous.lock_name = spins_current.lock_name)
where
spins_current.snap_time = @current_snap_time
and spins_previous.snap_time = @previous_snap_time
and spins_current.spins > 0
order by (spins_current.spins - spins_previous.spins) desc

--clean up table
delete from #_spin_waits
where snap_time = @previous_snap_time
Olu
New Member
New Member

--
09 Jun 2014 04:41 AM
/*cpu weight dbcc used to inflating several resource costs*/
/*noteable use..to force increased DOP*/
DBCC TRACEON (3604); -- Show DBCC output
DBCC SETCPUWEIGHT(1E0); -- Default CPU weight
DBCC SETIOWEIGHT(0.6E0); -- I/O multiplier = 0.6
DBCC SHOWWEIGHTS; -- Show the settings

Olu
New Member
New Member

--
09 Jun 2014 04:44 AM
/*Useful for quick historical search of resource info/bottleneck info*/


WITH cRingBufferOOM
AS (
SELECT ring_buffer_type,CAST (record as xml) record_xml, [timestamp] FROM sys.dm_os_ring_buffers r
--WHERE ring_buffer_type = 'RING_BUFFER_OOM'
WHERE ring_buffer_type ='RING_BUFFER_RESOURCE_MONITOR'
)
SELECT
--ring_buffer_type
--,rx.value('(@id)[1]', 'bigint') AS RecordID
--,
--DATEADD (ms, -1 * osi.ms_ticks - rx.value('(@timestamp)[1]', 'bigint'), GETDATE()) AS DateOccurred
dateadd (ms, rbo.[timestamp] - osi.ms_ticks, getdate()) as record_time
,rx.value('(OOM/Action)[1]', 'varchar(30)') AS MemoryAction
,rx.value('(OOM/Pool)[1]', 'int') AS MemoryPool
,rx.value('(MemoryNode/ReservedMemory)[1]', 'bigint')/1024 AS ReservedMemoryMB
,rx.value('(MemoryNode/SharedMemory)[1]', 'bigint')/1024 AS SharedMemoryMB
,rx.value('(MemoryNode/AWEMemory)[1]', 'bigint')/1024 AS AWEMemoryMB
,rx.value('(MemoryNode/SinglePagesMemory)[1]', 'bigint')/1024 AS SinglePagesMemoryMB
,rx.value('(MemoryNode/MultiplePagesMemory)[1]', 'bigint')/1024 AS MultiplePagesMemoryMB
,rx.value('(MemoryNode/@id)[1]', 'bigint') AS NodeID
,rx.value('(MemoryNode/ReservedMemory)[1]', 'bigint')/1024 AS SQL_ReservedMemoryMB
,rx.value('(MemoryNode/CommittedMemory)[1]', 'bigint')/1024 AS SQL_CommittedMemoryMB
,rx.value('(MemoryRecord/MemoryUtilization)[1]', 'bigint') AS MemoryUtilization
,rx.value('(MemoryRecord/TotalPhysicalMemory)[1]', 'bigint')/1024 AS TotalPhysicalMemoryMB
,rx.value('(MemoryRecord/AvailablePhysicalMemory)[1]', 'bigint')/1024 AS AvailablePhysicalMemoryMB
,rx.value('(MemoryRecord/TotalPageFile)[1]', 'bigint')/1024 AS TotalPageFileMB
,rx.value('(MemoryRecord/AvailablePageFile)[1]', 'bigint')/1024 AS AvailablePageFileMB
,rx.value('(MemoryRecord/TotalVirtualAddressSpace)[1]', 'bigint')/1024 AS TotalVASMB
,rx.value('(MemoryRecord/AvailableExtendedVirtualAddressSpace)[1]', 'bigint')/1024 AS AvailableExtendedVASMB
FROM cRingBufferOOM rbo
CROSS APPLY rbo.record_xml.nodes('Record') record(rx)
CROSS JOIN sys.dm_os_sys_info osi
order by 1


RING_BUFFER_RESOURCE_MONITOR
RING_BUFFER_SCHEDULER_MONITOR
RING_BUFFER_MEMORY_BROKER
RING_BUFFER_SECURITY_ERROR
RING_BUFFER_XE_BUFFER_STATE
RING_BUFFER_SCHEDULER
RING_BUFFER_EXCEPTION
RING_BUFFER_CONNECTIVITY
RING_BUFFER_XE_LOG


select r.ring_buffer_address,
r.ring_buffer_type,
dateadd (ms, r.[timestamp] - sys.ms_ticks, getdate()) as record_time,
cast(r.record as xml) record
from sys.dm_os_ring_buffers r
cross join sys.dm_os_sys_info sys
where
ring_buffer_type='RING_BUFFER_RESOURCE_MONITOR'
order by 3 desc
Olu
New Member
New Member

--
09 Jun 2014 04:46 AM
SELECT ( SELECT occurrence FROM sys.dm_exec_query_optimizer_info
WHERE counter = 'hints' ) * 100.0 /
(
SELECT occurrence FROM sys.dm_exec_query_optimizer_info WHERE counter = 'optimizations'
)

The above query tells me how many hints are in the system – thus telling me how flexible it is to ugrades etc..


Good article by Paul White on how the optimizer considers plans for parallelism, how you can frigg a query to produce a parallel plan

Also discusses what to avoid if you want to be guaranteed a parallel plan (table valued functions – optimizer is unable to determine the number of rows)


http://sqlblog.com/blogs/paul_white...-plan.aspx
Olu
New Member
New Member

--
09 Jun 2014 04:50 AM
Configuring -Soft NUMA and processor settings using start up parameters

http://www.sqlskills.com/blogs/jona...soft-numa/
You are not authorized to post a reply.

Acceptable Use Policy