SQL Server Health Check v4.0

In the latest version of the SQL Server Health Check, I am trying to reduce the need to execute the entire script based on what particular information is required. By setting a variable at the start, you can gather more detailed informational, resource usage, or troubleshooting information. You can also choose to execute the entire script. There is also the syntax provided to execute the script from the command line. More information is in the header of the script.

--sql_server_health_check_v4.0.sql
--v4.0
--This is a script written to give an informational, resource usage, or troubleshooting
--view of a SQL Server instance for the purpose of designated purposeful review.
--The results of the queries are stored in temporary tables for the purpose of narrowing
--output based on Informational, Resource usage, Troubleshooting, or All of the above
--
--A parameter designated at the start of execution will generate results based on one of
--these three selections.
--Informational (I) - Sections 1,2,3,5,6,11,12,13,14,17,27,28,29,30
--Resource (R) - Sections 1,5,7,8,9,10,15,16
--Troubleshooting (T) - Sections 1,4,5,17,18,19,20,21,22,23,24,25,26,30
--All (A) - Sections 1-30
--
--Execution
--Ideally, the contents of this script can be pasted into a SQL Server Management Studio (SSMS)
--session, executed, and the output saved in a preferred format.
--However, if command-line execution is desired, set the execution type parameter in the script,
--then use the following execution format:
--sqlcmd -s <SERVER_NAME> -d master -i <SCRIPT_NAME> -U <LOGIN_USER> -P <LOGIN_PASSWORD> -o <OUTPUT_TEXTFILE>
--Omit the -o argument until any execution errors are resolved. 

/*
Information gathering sections

1-Node name
2-Product Version, level, and Edition
3-License information
4-Instance parameters
5-Database listing
6-Database startup time
7-Database uptime
8-Database data and log size with total
9-Database file size and growth settings
10-Database file size free space
11-Is this a Cluster Node?
12-Nodes in Cluster
13-Is AlwaysOn enabled (2012 and above)?
14-AlwaysOn status
15-Memory usage per database
16-Memory usage of in-memory OLTP tables
17-Last backup time per database
18-No log backups for FULL or BULK_LOGGED recovery model databases in last 30 days
19-Databases with no backups at all in the last 30 says
20-Backups for the previous week per database
21-Jobs that failed in the last 24 hours
22-Missing indexes
23-Duplicate indexes
24-High index fragmentation check 
25-Tables that have not had a stats update in more than 30 days and have significant changes
26-Wait stats
27-Users and roles
28-Job information
29-Existing linked server listing
30-User statistics identification
*/

use master
GO

--Execution type
--Use one of the following values.  
--Any value other than these will result in no output 
--I (Informational)
--R (Resource usage)
--T (Troubleshooting)
--A (All sections executed)
DECLARE @ExecType VARCHAR(1) = 'R'

----------------------------------------------------------1-Node name

IF @ExecType in('I','R','T','A')
BEGIN
SELECT SERVERPROPERTY('ComputerNamePhysicalNetBIOS') AS [CurrentNodeName];
END



----------------------------------------------------------2-Product Version, level, and Edition

IF @ExecType in('I','A')
BEGIN
SELECT SERVERPROPERTY('productversion') AS ProductVersion,
       SERVERPROPERTY ('productlevel') AS ProductLevel,
       SERVERPROPERTY ('edition') AS Edition;
END


----------------------------------------------------------3-License information

IF @ExecType in('I','A')
BEGIN
print N'License information';

SELECT SERVERPROPERTY('LicenseType') AS LICENSE_TYPE, 
ISNULL(SERVERPROPERTY('NumLicenses'),0) AS NUM_LICENCES;
END

----------------------------------------------------------4-Instance parameters

IF @ExecType in('T','A')
BEGIN
print N'Instance Parameter';

SELECT  *
FROM    sys.configurations
ORDER BY name ;
END

----------------------------------------------------------5-Database listing

IF @ExecType in('I','R','T','A')
BEGIN
print N'Database list with Status and Recovery Model';

SELECT substring(name,1,40) AS name,  substring(state_desc,1,20) AS STATE, 
substring(recovery_model_desc,1,20) AS RECOVERY_MODEL
   FROM sys.databases
order by name;
END

----------------------------------------------------------6-Database startup time

IF @ExecType in('I','A')
BEGIN
print N'Start time';

SELECT DATEADD(ms,-sample_ms,GETDATE() )AS StartTime
FROM sys.dm_io_virtual_file_stats(1,1);
END


----------------------------------------------------------7-Database uptime

IF @ExecType in('R','A')
BEGIN
print N'Up time';

DECLARE @server_start_time DATETIME,
@seconds_diff INT,
@years_online INT,
@days_online INT,
@hours_online INT,
@minutes_online INT,
@seconds_online INT ;

SELECT @server_start_time = login_time
FROM master.sys.sysprocesses
WHERE spid = 1 ;

SELECT @seconds_diff = DATEDIFF(SECOND, @server_start_time, GETDATE()),
@years_online = @seconds_diff / 31536000,
@seconds_diff = @seconds_diff % 31536000,
@days_online = @seconds_diff / 86400,
@seconds_diff = @seconds_diff % 86400,
@hours_online = @seconds_diff / 3600,
@seconds_diff = @seconds_diff % 3600,
@minutes_online = @seconds_diff / 60,
@seconds_online = @seconds_diff % 60 ;

SELECT @server_start_time AS server_start_time,
@years_online AS years_online,
@days_online AS days_online,
@hours_online AS hours_online,
@minutes_online AS minutes_online,
@seconds_online AS seconds_online ;

SELECT substring(name,1,40) AS name,  substring(state_desc,1,20) AS STATE, 
substring(recovery_model_desc,1,20) AS RECOVERY_MODEL
   FROM sys.databases
order by name;
END

----------------------------------------------------------8-Database data and log size with total

IF @ExecType in('R','A')
BEGIN
print N'Data and Log Size with Total';

with fs
as
(
    select database_id, type, size * 8.0 / 1024 size
    from sys.master_files
)
select 
    name,
    (select sum(size) from fs where type = 0 and fs.database_id = db.database_id) DataFileSizeMB,
    (select sum(size) from fs where type = 1 and fs.database_id = db.database_id) LogFileSizeMB,
    (select sum(size) from fs where type = 0 and fs.database_id = db.database_id) +
    (select sum(size) from fs where type = 1 and fs.database_id = db.database_id) AS Total_MB
from sys.databases db;

--Grand total of ALL data and log files as one value
select SUM(size*8.0)/1024 AS Total_MB
from sys.master_files;
END

----------------------------------------------------------9-Database file size and growth settings

IF @ExecType in('R','A')
BEGIN
print N'Size and Growth';

select substring(b.name,1,40) AS DB_Name, substring(a.name,1,40) AS Logical_name, 
substring(a.filename,1,100) AS File_Name,
cast((a.size * 8.00) / 1024 as numeric(12,2)) as DB_Size_in_MB,
case when a.growth > 100 then 'In MB' else 'In Percentage' end File_Growth,
cast(case when a.growth > 100 then (a.growth * 8.00) / 1024
else (((a.size * a.growth) / 100) * 8.00) / 1024
end as numeric(12,2)) File_Growth_Size_in_MB,
case when ( maxsize = -1 or maxsize=268435456 ) then 'AutoGrowth Not Restricted' else 'AutoGrowth Restricted' end AutoGrowth_Status
from sysaltfiles a
join sysdatabases b on a.dbid = b.dbid
where DATABASEPROPERTYEX(b.name, 'status') = 'ONLINE'
order by b.name;
END

----------------------------------------------------------10-Database file size free space

IF @ExecType in('R','A')
BEGIN
SELECT DB_NAME() AS DbName, 
name AS FileName, 
size/128.0 AS CurrentSizeMB, 
size/128.0 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS INT)/128.0 AS FreeSpaceMB,
round(100-((size/128.0 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS INT)/128.0)/(size/128.0))*100,2,1) AS PCT_FREE
FROM sys.database_files; 
END

----------------------------------------------------------11-Is this a Cluster Node?

IF @ExecType in('I','A')
BEGIN
SELECT 'Clustered', case when SERVERPROPERTY('IsClustered') = 0 then 'No'
else 'Yes' end;
END

----------------------------------------------------------12-Nodes in Cluster

IF @ExecType in('I','A')
BEGIN
print N'Cluster Nodes';

SELECT * FROM fn_virtualservernodes();
END

----------------------------------------------------------13-Is AlwaysOn enabled (2012 and above)?


IF @ExecType in('I','A')
BEGIN
SELECT 'AlwaysOn', case when SERVERPROPERTY('IsHadrEnabled') = 0 then 'No'
                        when SERVERPROPERTY('IsHadrEnabled') = 1 then 'Yes'
                        else SERVERPROPERTY('IsHadrEnabled') end;
END

----------------------------------------------------------14-AlwaysOn status

IF @ExecType in('I','A')
BEGIN
declare @c int;
declare @rd nvarchar(60); 
declare @osd nvarchar(60);
declare @rhd nvarchar(60); 
declare @shd nvarchar(60); 
declare @csd nvarchar(60);
select @c = COUNT(name) 
from sys.all_objects
where name = 'dm_hadr_availability_replica_states';
if @c = 0
print N'No AlwaysOn Status';
else
select @rd = role_desc, @osd= case when operational_state_desc is null then 'Replica is not local'
                  else operational_state_desc end,
       @rhd = recovery_health_desc, @shd = synchronization_health_desc,
       @csd = connected_state_desc
from sys.dm_hadr_availability_replica_states;
print @rd
print @osd
print @rhd
print @shd
print @csd
END  


----------------------------------------------------------15-Memory usage per database

IF @ExecType in('R','A')
BEGIN
print N'Memory Usage per User Database';

SELECT 
                substring(DB_NAME(database_id),1,40) AS [Database Name]
                ,COUNT(*) * 8/1024.0 AS [Cached Size (MB)]
FROM 
                sys.dm_os_buffer_descriptors
WHERE 
                database_id > 4 
                AND database_id <> 32767
                AND db_name(database_id) <> 'SSISDB'
GROUP BY DB_NAME(database_id)
ORDER BY [Cached Size (MB)] DESC OPTION (RECOMPILE);
END


----------------------------------------------------------16-Memory usage of in-memory OLTP tables

IF @ExecType in('R','A')
BEGIN
print N'In-memory OLTP table usage';
				      SELECT object_name(object_id) AS Name, *  
				      FROM sys.dm_db_xtp_table_memory_stats;
END

----------------------------------------------------------17-Last backup time per database

IF @ExecType in('I','T','A')
BEGIN
SELECT substring(sdb.Name,1,40) AS DatabaseName,
COALESCE(CONVERT(VARCHAR(12), MAX(bus.backup_finish_date), 101),'-') AS LastBackUpTime
FROM sys.sysdatabases sdb
LEFT OUTER JOIN msdb.dbo.backupset bus ON bus.database_name = sdb.name
where sdb.Name <> 'tempdb'
GROUP BY sdb.Name;
END

----------------------------------------------------------18-No log backups for FULL or BULK_LOGGED recovery model databases in last 30 days

IF @ExecType in('T','A')
BEGIN
print N'Databases with FULL or BULK_LOGGED recovery model and no log backups in last 30 days';

SELECT name AS at_risk_database
   FROM sys.databases
where recovery_model_desc in('FULL','BULK_LOGGED')
and name not in(
SELECT 
msdb.dbo.backupset.database_name AS DBName 
FROM msdb.dbo.backupmediafamily 
INNER JOIN msdb.dbo.backupset ON msdb.dbo.backupmediafamily.media_set_id = msdb.dbo.backupset.media_set_id 
WHERE (CONVERT(datetime, msdb.dbo.backupset.backup_start_date, 102) >= GETDATE() - 30)
and msdb..backupset.type = 'L'
group by msdb.dbo.backupset.database_name
);
END

----------------------------------------------------------19-Databases with no backups at all in the last 30 says

IF @ExecType in('T','A')
BEGIN
print N'Databases with NO backups in last 30 days';

SELECT name AS at_risk_database
   FROM sys.databases
where name <> 'tempdb'
and name not in(
SELECT 
substring(msdb.dbo.backupset.database_name,1,40) AS DBName 
FROM msdb.dbo.backupmediafamily 
INNER JOIN msdb.dbo.backupset ON msdb.dbo.backupmediafamily.media_set_id = msdb.dbo.backupset.media_set_id 
WHERE (CONVERT(datetime, msdb.dbo.backupset.backup_start_date, 102) >= GETDATE() - 30)
group by msdb.dbo.backupset.database_name
);
END

----------------------------------------------------------20-Backups for the previous week per database

IF @ExecType in('T','A')
BEGIN
print N'All backups for previous week';

SELECT 
CONVERT(CHAR(40), SERVERPROPERTY('Servername')) AS Server, 
substring(msdb.dbo.backupset.database_name,1,40) AS DBName, 
msdb.dbo.backupset.backup_start_date, 
msdb.dbo.backupset.backup_finish_date, 
msdb.dbo.backupset.expiration_date, 
CASE msdb..backupset.type 
WHEN 'D' THEN 'Database' 
WHEN 'L' THEN 'Log'
WHEN 'F' THEN 'File'
WHEN 'P' THEN 'Partial'
WHEN 'I' THEN 'Differential database'
WHEN 'G' THEN 'Differential file'
WHEN 'Q' THEN 'Differential partial'
WHEN NULL THEN msdb..backupset.type 
END AS backup_type, 
msdb.dbo.backupset.backup_size, 
substring(msdb.dbo.backupmediafamily.logical_device_name,1,50) AS logical_device_name, 
substring(msdb.dbo.backupmediafamily.physical_device_name,1,50) AS physical_device_name, 
substring(msdb.dbo.backupset.name,1,50) AS backupset_name, 
substring(msdb.dbo.backupset.description,1,50) AS description 
FROM msdb.dbo.backupmediafamily 
INNER JOIN msdb.dbo.backupset ON msdb.dbo.backupmediafamily.media_set_id = msdb.dbo.backupset.media_set_id 
WHERE (CONVERT(datetime, msdb.dbo.backupset.backup_start_date, 102) >= GETDATE() - 7) 
ORDER BY 
msdb.dbo.backupset.database_name, 
msdb.dbo.backupset.backup_finish_date;
END

----------------------------------------------------------21-Jobs that failed in the last 24 hours

IF @ExecType in('T','A')
BEGIN
print N'Jobs Failing in last 24 hours';

DECLARE @PreviousDate datetime
DECLARE @Year VARCHAR(4)
DECLARE @Month VARCHAR(2)
DECLARE @MonthPre VARCHAR(2)
DECLARE @Day VARCHAR(2)
DECLARE @DayPre VARCHAR(2)
DECLARE @FinalDate INT

SET @PreviousDate = DATEADD(dd, -1, GETDATE()) --Last 1 day
SET @Year = DATEPART(yyyy, @PreviousDate)
SELECT @MonthPre = CONVERT(VARCHAR(2), DATEPART(mm, @PreviousDate))
SELECT @Month = RIGHT(CONVERT(VARCHAR, (@MonthPre + 1000000000)),2)
SELECT @DayPre = CONVERT(VARCHAR(2), DATEPART(dd, @PreviousDate))
SELECT @Day = RIGHT(CONVERT(VARCHAR, (@DayPre + 1000000000)),2)
SET @FinalDate = CAST(@Year + @Month + @Day AS INT)
--Final Logic
SELECT substring(j.[name],1,40) AS JOB,
substring(s.step_name,1,40) AS Step,
h.step_id,
substring(h.step_name,1,40) AS Step,
h.run_date,
h.run_time,
h.sql_severity,
substring(h.message,1,100) AS Message,
h.server
FROM msdb.dbo.sysjobhistory h
INNER JOIN msdb.dbo.sysjobs j
ON h.job_id = j.job_id
INNER JOIN msdb.dbo.sysjobsteps s
ON j.job_id = s.job_id AND h.step_id = s.step_id
WHERE h.run_status = 0 --Failure
AND h.run_date > @FinalDate
ORDER BY h.instance_id DESC;
END

----------------------------------------------------------22-Missing indexes

IF @ExecType in('T','A')
BEGIN
print N'Missing Indexes';

SELECT substring(so.name,1,40) AS Name
, (avg_total_user_cost * avg_user_impact) * (user_seeks + user_scans) as Impact
, ddmid.equality_columns
, ddmid.inequality_columns
, ddmid.included_columns
FROM sys.dm_db_missing_index_group_stats AS ddmigs
INNER JOIN sys.dm_db_missing_index_groups AS ddmig
ON ddmigs.group_handle = ddmig.index_group_handle
INNER JOIN sys.dm_db_missing_index_details AS ddmid
ON ddmig.index_handle = ddmid.index_handle
INNER JOIN sys.objects so WITH (nolock)
ON ddmid.object_id = so.object_id
WHERE ddmigs.group_handle IN (
SELECT TOP (5000) group_handle
FROM sys.dm_db_missing_index_group_stats WITH (nolock)
ORDER BY (avg_total_user_cost * avg_user_impact)*(user_seeks+user_scans)DESC);
END

----------------------------------------------------------23-Duplicate indexes

IF @ExecType in('T','A')
BEGIN
print N'Duplicate Indexes';

DECLARE @SCHEMANAME VARCHAR(30);
DECLARE @TABLENAME VARCHAR(127);
WITH ind_list AS(
  select o.schema_id, i.object_id, i.index_id,
    i.name, i.type_desc,
    i.is_unique, i.is_primary_key, 
    STUFF( (SELECT ',' + tc.name
            FROM sys.index_columns ic
              JOIN sys.columns tc
               ON tc.column_id = ic.column_id AND
                  tc.object_id = ic.object_id
            WHERE ic.object_id = i.object_id AND
                  ic.index_id = i.index_id 
              AND ic.is_included_column = 0
            ORDER BY ic.index_column_id
            FOR XML PATH ('') ),1,1,'' ) index_columns,
    STUFF( (SELECT ',' + tc.name
            FROM sys.index_columns ic
              JOIN sys.columns tc
               ON tc.column_id = ic.column_id AND
                  tc.object_id = ic.object_id
            WHERE ic.object_id = i.object_id AND
                  ic.index_id = i.index_id
               AND ic.is_included_column = 1
            ORDER BY ic.index_column_id
            FOR XML PATH ('') ),1,1,'' ) include_columns
  FROM sys.indexes i
    JOIN sys.objects o ON o.object_id = i.object_id
  WHERE i.index_id > 0 AND i.type_desc <> 'XML'
    AND object_name(i.object_id) LIKE @TABLENAME
    AND i.is_disabled = 0 
    AND schema_name(o.schema_id) LIKE @SCHEMANAME )
SELECT substring(schema_name(included_indexes.schema_id),1,30) AS owner, 
  object_name(included_indexes.object_id) table_name,
  (SELECT SUM(st.row_count) FROM sys.dm_db_partition_stats st
   WHERE st.object_id = included_indexes.object_id
     AND st.index_id < 2 ) num_rows,
  included_indexes.name included_index_name, 
  included_indexes.index_columns included_index_columns, 
  included_indexes.include_columns
       included_index_include_columns,
  included_indexes.type_desc included_index_type, 
  included_indexes.is_unique included_index_uniqueness,
  included_indexes.is_primary_key included_index_PK,
  (SELECT SUM(a.total_pages) * 8 FROM sys.allocation_units a
    JOIN sys.partitions p ON a.container_id = p.partition_id
   WHERE p.object_id = included_indexes.object_id AND
     p.index_id = included_indexes.index_id
  ) included_index_size_kb,
  including_indexes.name including_index_name, 
  including_indexes.index_columns including_index_columns, 
  including_indexes.include_columns
       including_index_include_columns,
  including_indexes.type_desc including_index_type, 
  including_indexes.is_unique including_index_uniqueness,
  including_indexes.is_primary_key including_index_PK,
  (SELECT SUM(a.total_pages) * 8 FROM sys.allocation_units a
     JOIN sys.partitions p ON a.container_id = p.partition_id
   WHERE p.object_id = including_indexes.object_id AND
    p.index_id = including_indexes.index_id
  ) including_index_size_kb
FROM ind_list included_indexes
  JOIN ind_list including_indexes
    ON including_indexes.object_id = included_indexes.object_id
  JOIN sys.partitions ing_p 
   ON ing_p.object_id = including_indexes.object_id AND
      ing_p.index_id = including_indexes.index_id
  JOIN sys.allocation_units ing_a
   ON ing_a.container_id = ing_p.partition_id
WHERE including_indexes.index_id <> included_indexes.index_id
  AND LEN(included_indexes.index_columns) <=
      LEN(including_indexes.index_columns) 
  AND included_indexes.index_columns + ',' =
      SUBSTRING(including_indexes.index_columns,1,
              LEN(included_indexes.index_columns + ','))
ORDER BY 2 DESC;
END

----------------------------------------------------------24-High index fragmentation check 

IF @ExecType in('T','A')
BEGIN
print N'Index with HIGH Fragmentation';

EXEC sp_MSforeachdb '
USE [?]
SELECT ''?'' AS DB_NAME,
QUOTENAME(sysind.name) AS [index_name],
indstat.*
FROM sys.dm_db_index_physical_stats (DB_ID(), NULL, NULL, NULL, ''LIMITED'')
AS indstat
INNER JOIN sys.indexes sysind ON indstat.object_id = sysind.object_id AND
indstat.index_id = sysind.index_id
where avg_fragmentation_in_percent >= 30
ORDER BY avg_fragmentation_in_percent DESC;
'
END

--Because of the sp_MSforeachdb routine above, this ensures a switch back to the master database
DECLARE @DBNAME varchar(128)
SELECT @DBNAME = DB_NAME()
IF @DBNAME <> 'master'
BEGIN
USE MASTER;
END

-----------------25-Tables that have not had a stats update in more than 30 days and have significant changes

IF @ExecType in('T','A')
BEGIN
EXEC sp_MSforeachdb '
USE [?]
IF ''?'' <> ''master'' AND ''?'' <> ''model'' AND ''?'' <> ''msdb'' AND ''?'' <> ''tempdb''
SELECT ''?'' AS DB_NAME,obj.name, obj.object_id, stat.name, stat.stats_id, last_updated, modification_counter  
FROM sys.objects AS obj   
INNER JOIN sys.stats AS stat ON stat.object_id = obj.object_id  
CROSS APPLY sys.dm_db_stats_properties(stat.object_id, stat.stats_id) AS sp  
WHERE obj.type = ''U''
AND last_updated < getdate() - 30 
AND modification_counter > 1000
--OR last_updated IS NULL;
'
END

--Because of the sp_MSforeachdb routine above, this ensures a switch back to the master database
--DECLARE @DBNAME varchar(128)  --This is not needed since it was declared at the end of #24
SELECT @DBNAME = DB_NAME()
IF @DBNAME <> 'master'
BEGIN
USE MASTER;
END

----------------------------------------------------------26-Wait stats

IF @ExecType in('T','A')
BEGIN
print N'Wait Stats';

SELECT *
FROM sys.dm_os_wait_stats
where wait_time_ms > 10000
ORDER BY wait_time_ms DESC;
END

----------------------------------------------------------27-Users and roles

IF @ExecType in('I','A')
BEGIN
print N'Users and Roles';

WITH Roles_CTE(Role_Name, Username)
AS
(
	SELECT 
		User_Name(sm.[groupuid]) as [Role_Name],
		user_name(sm.[memberuid]) as [Username]
	FROM [sys].[sysmembers] sm
)

SELECT  
    Roles_CTE.Role_Name,
    [DatabaseUserName] = princ.[name],
    [UserType] = CASE princ.[type]
                    WHEN 'S' THEN 'SQL User'
                    WHEN 'U' THEN 'Windows User'
                    WHEN 'G' THEN 'Windows Group'
                    WHEN 'A' THEN 'Application Role'
                    WHEN 'R' THEN 'Database Role'
                    WHEN 'C' THEN 'User mapped to a certificate'
                    WHEN 'K' THEN 'User mapped to an asymmetric key'
                 END
FROM 
    sys.database_principals princ 
JOIN Roles_CTE on Username = princ.name
where princ.type in ('S', 'U', 'G', 'A', 'R', 'C', 'K')
ORDER BY princ.name;
END

----------------------------------------------------------28-Job information

IF @ExecType in('I','A')
BEGIN
print N'Job Information';

SELECT	 [JobName] = [jobs].[name]
		,[Category] = [categories].[name]
		,[Owner] = SUSER_SNAME([jobs].[owner_sid])
		,[Enabled] = CASE [jobs].[enabled] WHEN 1 THEN 'Yes' ELSE 'No' END
		,[Scheduled] = CASE [schedule].[enabled] WHEN 1 THEN 'Yes' ELSE 'No' END
		,[Description] = [jobs].[description]
		,[Occurs] = 
				CASE [schedule].[freq_type]
					WHEN   1 THEN 'Once'
					WHEN   4 THEN 'Daily'
					WHEN   8 THEN 'Weekly'
					WHEN  16 THEN 'Monthly'
					WHEN  32 THEN 'Monthly relative'
					WHEN  64 THEN 'When SQL Server Agent starts'
					WHEN 128 THEN 'Start whenever the CPU(s) become idle' 
					ELSE ''
				END
		,[Occurs_detail] = 
				CASE [schedule].[freq_type]
					WHEN   1 THEN 'O'
					WHEN   4 THEN 'Every ' + CONVERT(VARCHAR, [schedule].[freq_interval]) + ' day(s)'
					WHEN   8 THEN 'Every ' + CONVERT(VARCHAR, [schedule].[freq_recurrence_factor]) + ' weeks(s) on ' + 
						LEFT(
							CASE WHEN [schedule].[freq_interval] &  1 =  1 THEN 'Sunday, '    ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] &  2 =  2 THEN 'Monday, '    ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] &  4 =  4 THEN 'Tuesday, '   ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] &  8 =  8 THEN 'Wednesday, ' ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] & 16 = 16 THEN 'Thursday, '  ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] & 32 = 32 THEN 'Friday, '    ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] & 64 = 64 THEN 'Saturday, '  ELSE '' END , 
							LEN(
								CASE WHEN [schedule].[freq_interval] &  1 =  1 THEN 'Sunday, '    ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] &  2 =  2 THEN 'Monday, '    ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] &  4 =  4 THEN 'Tuesday, '   ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] &  8 =  8 THEN 'Wednesday, ' ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] & 16 = 16 THEN 'Thursday, '  ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] & 32 = 32 THEN 'Friday, '    ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] & 64 = 64 THEN 'Saturday, '  ELSE '' END 
							) - 1
						)
					WHEN  16 THEN 'Day ' + CONVERT(VARCHAR, [schedule].[freq_interval]) + ' of every ' + CONVERT(VARCHAR, [schedule].[freq_recurrence_factor]) + ' month(s)'
					WHEN  32 THEN 'The ' + 
							CASE [schedule].[freq_relative_interval]
								WHEN  1 THEN 'First'
								WHEN  2 THEN 'Second'
								WHEN  4 THEN 'Third'
								WHEN  8 THEN 'Fourth'
								WHEN 16 THEN 'Last' 
							END +
							CASE [schedule].[freq_interval]
								WHEN  1 THEN ' Sunday'
								WHEN  2 THEN ' Monday'
								WHEN  3 THEN ' Tuesday'
								WHEN  4 THEN ' Wednesday'
								WHEN  5 THEN ' Thursday'
								WHEN  6 THEN ' Friday'
								WHEN  7 THEN ' Saturday'
								WHEN  8 THEN ' Day'
								WHEN  9 THEN ' Weekday'
								WHEN 10 THEN ' Weekend Day' 
							END + ' of every ' + CONVERT(VARCHAR, [schedule].[freq_recurrence_factor]) + ' month(s)' 
					ELSE ''
				END
		,[Frequency] = 
				CASE [schedule].[freq_subday_type]
					WHEN 1 THEN 'Occurs once at ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':')
					WHEN 2 THEN 'Occurs every ' + 
								CONVERT(VARCHAR, [schedule].[freq_subday_interval]) + ' Seconds(s) between ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':') + ' and ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_end_time]), 6), 5, 0, ':'), 3, 0, ':')
					WHEN 4 THEN 'Occurs every ' + 
								CONVERT(VARCHAR, [schedule].[freq_subday_interval]) + ' Minute(s) between ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':') + ' and ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_end_time]), 6), 5, 0, ':'), 3, 0, ':')
					WHEN 8 THEN 'Occurs every ' + 
								CONVERT(VARCHAR, [schedule].[freq_subday_interval]) + ' Hour(s) between ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':') + ' and ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_end_time]), 6), 5, 0, ':'), 3, 0, ':')
					ELSE ''
				END
		,[AvgDurationInSec] = CONVERT(DECIMAL(10, 2), [jobhistory].[AvgDuration])
		,[Next_Run_Date] = 
				CASE [jobschedule].[next_run_date]
					WHEN 0 THEN CONVERT(DATETIME, '1900/1/1')
					ELSE CONVERT(DATETIME, CONVERT(CHAR(8), [jobschedule].[next_run_date], 112) + ' ' + 
						 STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [jobschedule].[next_run_time]), 6), 5, 0, ':'), 3, 0, ':'))
				END
FROM	 [msdb].[dbo].[sysjobs] AS [jobs] WITh(NOLOCK) 
		 LEFT OUTER JOIN [msdb].[dbo].[sysjobschedules] AS [jobschedule] WITh(NOLOCK) 
				 ON [jobs].[job_id] = [jobschedule].[job_id] 
		 LEFT OUTER JOIN [msdb].[dbo].[sysschedules] AS [schedule] WITh(NOLOCK) 
				 ON [jobschedule].[schedule_id] = [schedule].[schedule_id] 
		 INNER JOIN [msdb].[dbo].[syscategories] [categories] WITh(NOLOCK) 
				 ON [jobs].[category_id] = [categories].[category_id] 
		 LEFT OUTER JOIN 
					(	SELECT	 [job_id], [AvgDuration] = (SUM((([run_duration] / 10000 * 3600) + 
																(([run_duration] % 10000) / 100 * 60) + 
																 ([run_duration] % 10000) % 100)) * 1.0) / COUNT([job_id])
						FROM	 [msdb].[dbo].[sysjobhistory] WITh(NOLOCK)
						WHERE	 [step_id] = 0 
						GROUP BY [job_id]
					 ) AS [jobhistory] 
				 ON [jobhistory].[job_id] = [jobs].[job_id];
END

----------------------------------------------------------29-Existing linked server listing

IF @ExecType in('I','A')
BEGIN
print N'Linked Server Information';

declare @x int;
select @x = COUNT(name) 
from sys.all_objects
where name = 'Servers';
if @x <> 0
SELECT * 
--c.name, provider, data_source, is_remote_login_enabled, b.modify_date
FROM sys.Servers a
LEFT OUTER JOIN sys.linked_logins b ON b.server_id = a.server_id
LEFT OUTER JOIN sys.server_principals c ON c.principal_id = b.local_principal_id
where a.server_id <> 0;
else
exec sp_linkedservers;
END

----------------------------------------------------------30-User statistics identification

IF @ExecType in('I','T','A')
BEGIN
print N'User statistics';

select s.name as STATS_NAME, SCHEMA_NAME(ob.Schema_id) AS SCHEMA_NAME, OBJECT_NAME(s.object_id) AS TABLE_NAME
FROM sys.stats s
INNER JOIN sys.Objects ob ON ob.Object_id = s.object_id
WHERE SCHEMA_NAME(ob.Schema_id) <> 'sys'
AND Auto_Created = 0 AND User_Created = 1;
END

Docker for Windows – Getting started

I have been looking at using Docker as an alternative to virtual machines (VMs) for running Oracle and SQL Server databases; SQL Server in particular. Although I have many saved VMs for various versions of SQL Server, after I boot them up, I need to install Windows updates, especially if it has been some time since the last update. I realize that I don’t have to install these updates, but feel compelled from working with Windows as my primary OS over the years.

Anyway, the update process can take a long time, and I then want to back up the newly-updated VM, replacing the older pre-updated version. This is a lot of effort to go through, especially if I just want to test a simple script.

Enter Docker. “Docker is an open platform for developing, shipping, and running applications. Docker provides the ability to package and run an application in a loosely isolated environment called a container. The isolation and security allow you to run many containers simultaneously on a given host. Containers are lightweight and contain everything needed to run the application, so you do not need to rely on what is currently installed on the host. You can easily share containers while you work, so that everyone you share with gets the same container that works in the same way.”

Sounds good. An open, light-weight environment the does not rely (much) on the underlying host. VMs do allow more isolation, but are large and difficult to share compared to Docker containers. Docker appears to be an attractive alternative to simple tasks like script testing, but probably not for creating HA test environments such as Oracle RAC and SQL Server Always On Availability Groups.

With this in mind, I found an article on the Docker Desktop for Windows utility and thought this would be a good starting point. Along the way, I will share a big problem with the latest version of the Docker Desktop and the step to fix it.

First, you need to go here to download and install the Docker Desktop for Windows. You will need admin privileges on the host and either Windows 10 64-bit Home or Pro 2004 (build 19041) or higher, or Enterprise or Education 1909 (build 18363) or higher for this installation. If you are unsure of your Windows version, you can go to the Windows control panel -> System -> About. Scroll down until you see the Windows specifications


Although you can install Docker Desktop on Windows 10 Home edition, you cannot run Windows-type containers unless you have Windows Pro or Enterprise versions AND you are a member of the Administrators group on the host. Company-owned laptops typically do not allow this unless a special request is approved.

After downloading the installer executable file and running it, you will be presented with the install options.

On this screen, you can determine if you want to install the required Windows Subsystem for Linux (WSL) components as well as creating a desktop shortcut. If you have the WSL version 2 components already installed, then you can uncheck this box and continue by clicking OK.

After a short install process, during which you may need to reboot, the Docker Desktop is installed.

Opening the Docker Desktop for the first time will automatically start the Docker Engine.

If you see the following screen, following the directions and link to install the WSL 2 kernel update.

Now for the problem I mentioned earlier. You may see this error screen when the Docker Engine starts.

If you see this error, click the Quit option to exit the Docker Desktop, then use Windows Explorer to locate the panic. log file under the C:\Users\All Users\Docker folder and remove the read only designation. The Docker Engine should start without errors the next time you open the Docker Desktop.

The Docker Engine is configured to run Linux containers by default, but we need to switch to Windows containers for this demonstration. You can switch containers by right-clicking on the Docker icon in the taskbar tray and selecting “Switch to Windows containers…”. The Docker Engine will restart after the switch. Repeat these steps to switch back to Linux containers.

This screen concerning a disabled containers feature may appear. Simply follow the directions and execute the command from a PowerShell command prompt, connected as system administrator

If you receive an “access denied” error while executing the PowerShell command, you will need to go to the Control Panel and enable the Windows features Containers and Hyper-V. A reboot will be required after these features are enabled.

While switching to Windows containers, if you see the same error screen that I showed you earlier when the Docker Engine started, just repeat the process regarding the panic.log file. In my case, the Docker Engine started fine after installation, but the error appeared while switching to Windows containers. Days later, and after several reboots, the error appeared when starting the Docker Engine. I don’t think this is a permissions issue because I have full admin privileges and was able to remove the read only aspect of the panic.log without having to acknowledge an admin rights action.

Windows containers by default use ephemeral storage. All container I/O happens in a “scratch space” and each container gets its own space. File creation and file writes are captured in the scratch space and do not escape to the host. When a container instance is stopped, all changes that occurred in the scratch space are thrown away. When a new container instance is started, a new scratch space is provided for the instance. Keep this in mind when making changes to the containers as these changes may not appear the next time the container is started.

Container images are a bundle of files expressed as a series of layers. Layer storage is all the files that are built into the container. In a default installation, layers are stored in C:\Program Data\Docker and split across the “image” and “windowsfilter” directories. You can change where the layers are stored using the docker-root configuration, as demonstrated in the Docker Engine on Windows documentation here. Now that Docker is installed and running, we can download a SQL Server 2017 container for testing. An assortment of free containers are available through the Docker Hub here. A free registration is required to download these containers. The process of downloading a Docker container is called a pull and when selecting a container through the Docker Hub, instructions are given to execute the pull of the particular container from the command line. The command line is simply a DOS command prompt. You can search for containers from the command line as well, but the results may appear a bit cryptic.

From the command line, execute the command docker pull microsoft/mssql-server-windows-developer:2017-latest to pull down the SQL Server image. It may take some time to download due to its size and the speed of you internet connection.

Once the container is downloaded, execute the following command to configure the container. The breakdown of this command is as follows:

-p 1433:1433 – This is the port to access the container and the one it will be listening on. Port 1433 is the default port for SQL Server services.

–name sql-win – The name of the container. This name is entirely up to the owner of the container.

sa_password=Semperf1 – The password for the SQL Server sa (system administrator) account. Password can be set to a container owner preferred value.

-v c:\Users\dba_j\Desktop\Temp\containerDB:c:\data – The first part of the path is a location on the host and the second part is the location in the container that equates to the host location. Files written to this location persist even after the removal of the container.

docker run -d -p 1433:1433 –name sql-win -e sa_password=Semperf1 -e ACCEPT_EULA=Y -e “MSSQL_AGENT_ENABLED=true” -v c:\Users\dba_j\Desktop\Temp\containerDB:c:\data microsoft/mssql-server-windows-developer:2017-latest

The following commands can be used to start, stop, and remove the container:

docker start <container_name> (In this case sql-win)

docker stop <container_name>

docker rm <container_name>

Start the container with the start command. You can see it running through the Docker Desktop for Windows app or by executing docker ps -a from the command line.

Download the SQL Server Management Studio (SSMS) from here and start it. You will see the connection window on top of the SSMS interface. Since we are using the default port for the connection, you should just need to supply the sa account password that was set in the container setup command.

The result should be a successful connection to the container database through SSMS.

Since there are no user databases available in the default container, you can download different versions of the Microsoft AdventureWorks sample database here. These downloads come in the form of a SQL Server backup file (.bak) and can be recovered into the container database through SSMS as you would a regular database

If you need to change any of the settings of the container, like the sa password, you can stop, remove, then re-execute the setup command with the changed values.

That’s it. You now have a working Docker container hosting environment. Just remember to switch between Linux and Windows container modes as needed.

Enjoy!

Windows cron?

As anyone who has been in the Database Administrator business for some time knows, most Unix-type operating systems come with a robust but sometimes cryptic utility called cron.

No, but close

Crom, I mean cron is used to schedule the automatic execution of scripts or programs on the Unix/Linux server.  It is often used to schedule backups or perform maintenance for Oracle databases created on the server.  Combining this utility with the flexibility of shell scripts is great for shops where a centralized management utility is absent.

If you are looking at Windows for a similar cron/script combination like Unix/Linux, then the Task Scheduler and basic batch files are available.  You can also use Powershell scripts for both Windows and Unix/Linux environments, but we will concentrate on a simple batch file example this time.

To open the Task Scheduler utility, press and hold the Windows key, then press the “s” key to open the Windows Search utility.

Windows button, in case you did not know

Once the search utility is open, start typing the word task and the Task Manager and Task Scheduler utilities should be listed.  Click on Task Scheduler and the utility appears.

We are going to pause here and create two simple batch files for stopping/disabling and enabling/starting a Windows service.  The purpose is to stop this service over a weekend and disable it at the same time so that a server reboot does not automatically start the service before we want it started.  To create these batch files, open a text file editor such as Notepad, copy and paste the lines below, and save the files as stop.bat and start.bat

Contents of the stop.bat batch file:

@echo off
net stop PrintService
sc config PrintService start=disabled
exit

Contents of the start.bat batch file:

@echo off
sc config PrintService start=auto
net start PrintService
exit

Now that we have our batch files created, it is time to schedule them for execution.  Go back to the Task Scheduler, pull down the Action menu at the top, and select Create Task.

In this General screen, give the task an informative name and a short description of what the task will do.

It is important to note the Security options here.  By default, this new task will execute as the account you are running the Task Scheduler under.  You can select a different account, but you MUST have the credentials for this account, especially if you want to automate the task to execute even if that account is not currently connected by selecting the “Run whether user is logged on or not” radio button.

Use caution when an actual user account is selected, as the task will fail if that account is disabled for some reason such as employee departure.  If this task is something that will execute for some time to come, consider creating a local account on the Windows server to own the task.

Whichever option you choose, make sure the account that owns the task has sufficient privileges to execute the commands in the batch files.  In this case, administrator privileges are required to manipulate Windows services.

After completing the information in this screen, go to the Triggers tab and click the New button.

This is where we will schedule the task to execute.  You have one time, daily, weekly, and monthly options.  The rest of this screen is fairly self-explanatory.  At the bottom of the screen is an Enabled option that will allow you to schedule the task, but not have it enabled to run yet.

After completing the information in this screen, go to the Actions tab and click the New button.

Here you will designate the action this new task will perform.  In later versions of this utility, only the “Start a program” action is available in the Action drop-down list, which is fine for our example.

Browse to the location of the stop.bat file and select it.  Click OK once the batch file is selected.  You can review the settings under the Conditions and Settings tabs, but for our example, the default settings are sufficient.

Click the OK button in the Create Task window once you are done.  If you selected to have this new task execute whether the executing account is logged on or not, you will be prompted for the password for this account, which the Task Scheduler will store.  If the password changes for this account, the tasks associated with the account will fail until the stored passwords are updated in the tasks.

Repeat this process to create a task for the start.bat batch file and you are all set.

Enjoy.

SQL Server Health Check v3.1

My main background has been Oracle databases and I have built my SQL Server knowledge over the years to be of assistance in situations where both Oracle and SQL Server databases are present.

My SQL Server Health Check script has been my attempt to distill my SQL Server knowledge into something that I can use to review a new SQL Server database that I have never worked with.

Sometimes we overlook the simplest things because we take for granted that they are already present and fine.  This is not always the case, obviously.  The latest iteration of my script has the addition of reporting tables in user databases that have not had statistics updated in the last 30 days and have significant changes since that time.

 

--sql_server_health_check_v3.1.sql
--v3.1
--This is a non-intrusive script written to give an overall informational view of a 
--SQL Server 2005-2016(CTP3) server instance for the purpose of first-time review.
--
--Version 3.1 additions
--Tables that have not had a stats update in more than 30 days and have significant changes

/*
Information gathered
-Node name
-License information
-Product Version, level, and Edition
-Instance parameters
-Start time and up time
-Database listing
-Database uptime
-Database data and log size with total
-Database file size and growth settings
_Database file size free space
-Is this a Cluster Node?
-Nodes in Cluster
-Is AlwaysOn enabled (2012 and above)?
-AlwaysOn status
-Memory usage per database
-Memory usage of in-memory OLTP tables
-Last backup time per database
-No log backups for FULL or BULK_LOGGED recovery model databases in last 30 days
-Databases with no backups at all in the last 30 says
-Backups for the previous week per database
-Jobs that failed in the last 24 hours
-Missing indexes
-Duplicate indexes
-High index fragmentation check 
-Tables that have not had a stats update in more than 30 days and have significant changes
-Wait stats
-Users and roles
-Job information
-Existing linked server listing
-User statistics identification

*/


use master
GO


----------------------------------------------------------Node name

SELECT SERVERPROPERTY('ComputerNamePhysicalNetBIOS') AS [CurrentNodeName];


----------------------------------------------------------Product Version, level, and Edition

SELECT SERVERPROPERTY('productversion') AS ProductVersion,
       SERVERPROPERTY ('productlevel') AS ProductLevel,
       SERVERPROPERTY ('edition') AS Edition;

----------------------------------------------------------License information

print N'License information';

SELECT SERVERPROPERTY('LicenseType') AS LICENSE_TYPE, 
ISNULL(SERVERPROPERTY('NumLicenses'),0) AS NUM_LICENCES;

----------------------------------------------------------Instance parameters

print N'Instance Parameter';


SELECT  *
FROM    sys.configurations
ORDER BY name ;


----------------------------------------------------------Database listing

print N'Database list with Status and Recovery Model';

SELECT substring(name,1,40) AS name,  substring(state_desc,1,20) AS STATE, 
substring(recovery_model_desc,1,20) AS RECOVERY_MODEL
   FROM sys.databases
order by name;

----------------------------------------------------------Database startup time

print N'Start time';

SELECT DATEADD(ms,-sample_ms,GETDATE() )AS StartTime
FROM sys.dm_io_virtual_file_stats(1,1);


----------------------------------------------------------Database start time uptime

print N'Up time';


DECLARE @server_start_time DATETIME,
@seconds_diff INT,
@years_online INT,
@days_online INT,
@hours_online INT,
@minutes_online INT,
@seconds_online INT ;

SELECT @server_start_time = login_time
FROM master.sys.sysprocesses
WHERE spid = 1 ;

SELECT @seconds_diff = DATEDIFF(SECOND, @server_start_time, GETDATE()),
@years_online = @seconds_diff / 31536000,
@seconds_diff = @seconds_diff % 31536000,
@days_online = @seconds_diff / 86400,
@seconds_diff = @seconds_diff % 86400,
@hours_online = @seconds_diff / 3600,
@seconds_diff = @seconds_diff % 3600,
@minutes_online = @seconds_diff / 60,
@seconds_online = @seconds_diff % 60 ;

SELECT @server_start_time AS server_start_time,
@years_online AS years_online,
@days_online AS days_online,
@hours_online AS hours_online,
@minutes_online AS minutes_online,
@seconds_online AS seconds_online ;


SELECT substring(name,1,40) AS name,  substring(state_desc,1,20) AS STATE, 
substring(recovery_model_desc,1,20) AS RECOVERY_MODEL
   FROM sys.databases
order by name;

----------------------------------------------------------Database data and log size with total

print N'Data and Log Size with Total';


with fs
as
(
    select database_id, type, size * 8.0 / 1024 size
    from sys.master_files
)
select 
    name,
    (select sum(size) from fs where type = 0 and fs.database_id = db.database_id) DataFileSizeMB,
    (select sum(size) from fs where type = 1 and fs.database_id = db.database_id) LogFileSizeMB,
    (select sum(size) from fs where type = 0 and fs.database_id = db.database_id) +
    (select sum(size) from fs where type = 1 and fs.database_id = db.database_id) AS Total_MB
from sys.databases db;

--Grand total of ALL data and log files as one value
select SUM(size*8.0)/1024 AS Total_MB
from sys.master_files;


----------------------------------------------------------Database file size and growth settings

print N'Size and Growth';


select substring(b.name,1,40) AS DB_Name, substring(a.name,1,40) AS Logical_name, 
substring(a.filename,1,100) AS File_Name,
cast((a.size * 8.00) / 1024 as numeric(12,2)) as DB_Size_in_MB,
case when a.growth > 100 then 'In MB' else 'In Percentage' end File_Growth,
cast(case when a.growth > 100 then (a.growth * 8.00) / 1024
else (((a.size * a.growth) / 100) * 8.00) / 1024
end as numeric(12,2)) File_Growth_Size_in_MB,
case when ( maxsize = -1 or maxsize=268435456 ) then 'AutoGrowth Not Restricted' else 'AutoGrowth Restricted' end AutoGrowth_Status
from sysaltfiles a
join sysdatabases b on a.dbid = b.dbid
where DATABASEPROPERTYEX(b.name, 'status') = 'ONLINE'
order by b.name;

----------------------------------------------------------Database file size free space

SELECT DB_NAME() AS DbName, 
name AS FileName, 
size/128.0 AS CurrentSizeMB, 
size/128.0 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS INT)/128.0 AS FreeSpaceMB,
round(100-((size/128.0 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS INT)/128.0)/(size/128.0))*100,2,1) AS PCT_FREE
FROM sys.database_files; 


----------------------------------------------------------Is this a Cluster Node?

SELECT 'Clustered', case when SERVERPROPERTY('IsClustered') = 0 then 'No'
else 'Yes' end;

----------------------------------------------------------Nodes in Cluster

print N'Cluster Nodes';

SELECT * FROM fn_virtualservernodes();


----------------------------------------------------------Is AlwaysOn enabled (2012 and above)?


SELECT 'AlwaysOn', case when SERVERPROPERTY('IsHadrEnabled') = 0 then 'No'
                        when SERVERPROPERTY('IsHadrEnabled') = 1 then 'Yes'
                        else SERVERPROPERTY('IsHadrEnabled') end;


----------------------------------------------------------AlwaysOn status

declare @c int;
declare @rd nvarchar(60); 
declare @osd nvarchar(60);
declare @rhd nvarchar(60); 
declare @shd nvarchar(60); 
declare @csd nvarchar(60);
select @c = COUNT(name) 
from sys.all_objects
where name = 'dm_hadr_availability_replica_states';
if @c = 0
print N'No AlwaysOn Status';
else
select @rd = role_desc, @osd= case when operational_state_desc is null then 'Replica is not local'
                  else operational_state_desc end,
       @rhd = recovery_health_desc, @shd = synchronization_health_desc,
       @csd = connected_state_desc
from sys.dm_hadr_availability_replica_states;
print @rd
print @osd
print @rhd
print @shd
print @csd
  
	

----------------------------------------------------------Memory usage per database

print N'Memory Usage per User Database';


SELECT 
                substring(DB_NAME(database_id),1,40) AS [Database Name]
                ,COUNT(*) * 8/1024.0 AS [Cached Size (MB)]
FROM 
                sys.dm_os_buffer_descriptors
WHERE 
                database_id > 4 
                AND database_id <> 32767
                AND db_name(database_id) <> 'SSISDB'
GROUP BY DB_NAME(database_id)
ORDER BY [Cached Size (MB)] DESC OPTION (RECOMPILE);

----------------------------------------------------------Memory usage of in-memory OLTP tables

print N'In-memory OLTP table usage';
				      SELECT object_name(object_id) AS Name, *  
				      FROM sys.dm_db_xtp_table_memory_stats;


----------------------------------------------------------Last backup time per database

SELECT substring(sdb.Name,1,40) AS DatabaseName,
COALESCE(CONVERT(VARCHAR(12), MAX(bus.backup_finish_date), 101),'-') AS LastBackUpTime
FROM sys.sysdatabases sdb
LEFT OUTER JOIN msdb.dbo.backupset bus ON bus.database_name = sdb.name
where sdb.Name <> 'tempdb'
GROUP BY sdb.Name;


----------------------------------------------------------No log backups for FULL or BULK_LOGGED recovery model databases in last 30 days


print N'Databases with FULL or BULK_LOGGED recovery model and no log backups in last 30 days';


SELECT name AS at_risk_database
   FROM sys.databases
where recovery_model_desc in('FULL','BULK_LOGGED')
and name not in(
SELECT 
msdb.dbo.backupset.database_name AS DBName 
FROM msdb.dbo.backupmediafamily 
INNER JOIN msdb.dbo.backupset ON msdb.dbo.backupmediafamily.media_set_id = msdb.dbo.backupset.media_set_id 
WHERE (CONVERT(datetime, msdb.dbo.backupset.backup_start_date, 102) >= GETDATE() - 30)
and msdb..backupset.type = 'L'
group by msdb.dbo.backupset.database_name
);


----------------------------------------------------------Databases with no backups at all in the last 30 says

print N'Databases with NO backups in last 30 days';

SELECT name AS at_risk_database
   FROM sys.databases
where name <> 'tempdb'
and name not in(
SELECT 
substring(msdb.dbo.backupset.database_name,1,40) AS DBName 
FROM msdb.dbo.backupmediafamily 
INNER JOIN msdb.dbo.backupset ON msdb.dbo.backupmediafamily.media_set_id = msdb.dbo.backupset.media_set_id 
WHERE (CONVERT(datetime, msdb.dbo.backupset.backup_start_date, 102) >= GETDATE() - 30)
group by msdb.dbo.backupset.database_name
);


----------------------------------------------------------Backups for the previous week per database

print N'All backups for previous week';

SELECT 
CONVERT(CHAR(40), SERVERPROPERTY('Servername')) AS Server, 
substring(msdb.dbo.backupset.database_name,1,40) AS DBName, 
msdb.dbo.backupset.backup_start_date, 
msdb.dbo.backupset.backup_finish_date, 
msdb.dbo.backupset.expiration_date, 
CASE msdb..backupset.type 
WHEN 'D' THEN 'Database' 
WHEN 'L' THEN 'Log'
WHEN 'F' THEN 'File'
WHEN 'P' THEN 'Partial'
WHEN 'I' THEN 'Differential database'
WHEN 'G' THEN 'Differential file'
WHEN 'Q' THEN 'Differential partial'
WHEN NULL THEN msdb..backupset.type 
END AS backup_type, 
msdb.dbo.backupset.backup_size, 
substring(msdb.dbo.backupmediafamily.logical_device_name,1,50) AS logical_device_name, 
substring(msdb.dbo.backupmediafamily.physical_device_name,1,50) AS physical_device_name, 
substring(msdb.dbo.backupset.name,1,50) AS backupset_name, 
substring(msdb.dbo.backupset.description,1,50) AS description 
FROM msdb.dbo.backupmediafamily 
INNER JOIN msdb.dbo.backupset ON msdb.dbo.backupmediafamily.media_set_id = msdb.dbo.backupset.media_set_id 
WHERE (CONVERT(datetime, msdb.dbo.backupset.backup_start_date, 102) >= GETDATE() - 7) 
ORDER BY 
msdb.dbo.backupset.database_name, 
msdb.dbo.backupset.backup_finish_date;


----------------------------------------------------------Jobs that failed in the last 24 hours

print N'Jobs Failing in last 24 hours';

----------------------------------------------------------Variable Declarations

DECLARE @PreviousDate datetime
DECLARE @Year VARCHAR(4)
DECLARE @Month VARCHAR(2)
DECLARE @MonthPre VARCHAR(2)
DECLARE @Day VARCHAR(2)
DECLARE @DayPre VARCHAR(2)
DECLARE @FinalDate INT

----------------------------------------------------------Initialize Variables

SET @PreviousDate = DATEADD(dd, -1, GETDATE()) --Last 1 day
SET @Year = DATEPART(yyyy, @PreviousDate)
SELECT @MonthPre = CONVERT(VARCHAR(2), DATEPART(mm, @PreviousDate))
SELECT @Month = RIGHT(CONVERT(VARCHAR, (@MonthPre + 1000000000)),2)
SELECT @DayPre = CONVERT(VARCHAR(2), DATEPART(dd, @PreviousDate))
SELECT @Day = RIGHT(CONVERT(VARCHAR, (@DayPre + 1000000000)),2)
SET @FinalDate = CAST(@Year + @Month + @Day AS INT)
--Final Logic
SELECT substring(j.[name],1,40) AS JOB,
substring(s.step_name,1,40) AS Step,
h.step_id,
substring(h.step_name,1,40) AS Step,
h.run_date,
h.run_time,
h.sql_severity,
substring(h.message,1,100) AS Message,
h.server
FROM msdb.dbo.sysjobhistory h
INNER JOIN msdb.dbo.sysjobs j
ON h.job_id = j.job_id
INNER JOIN msdb.dbo.sysjobsteps s
ON j.job_id = s.job_id AND h.step_id = s.step_id
WHERE h.run_status = 0 --Failure
AND h.run_date > @FinalDate
ORDER BY h.instance_id DESC;

----------------------------------------------------------Missing indexes

print N'Missing Indexes';


SELECT substring(so.name,1,40) AS Name
, (avg_total_user_cost * avg_user_impact) * (user_seeks + user_scans) as Impact
, ddmid.equality_columns
, ddmid.inequality_columns
, ddmid.included_columns
FROM sys.dm_db_missing_index_group_stats AS ddmigs
INNER JOIN sys.dm_db_missing_index_groups AS ddmig
ON ddmigs.group_handle = ddmig.index_group_handle
INNER JOIN sys.dm_db_missing_index_details AS ddmid
ON ddmig.index_handle = ddmid.index_handle
INNER JOIN sys.objects so WITH (nolock)
ON ddmid.object_id = so.object_id
WHERE ddmigs.group_handle IN (
SELECT TOP (5000) group_handle
FROM sys.dm_db_missing_index_group_stats WITH (nolock)
ORDER BY (avg_total_user_cost * avg_user_impact)*(user_seeks+user_scans)DESC);

----------------------------------------------------------Duplicate indexes

print N'Duplicate Indexes';

DECLARE @SCHEMANAME VARCHAR(30);
DECLARE @TABLENAME VARCHAR(127);
WITH ind_list AS(
  select o.schema_id, i.object_id, i.index_id,
    i.name, i.type_desc,
    i.is_unique, i.is_primary_key, 
    STUFF( (SELECT ',' + tc.name
            FROM sys.index_columns ic
              JOIN sys.columns tc
               ON tc.column_id = ic.column_id AND
                  tc.object_id = ic.object_id
            WHERE ic.object_id = i.object_id AND
                  ic.index_id = i.index_id 
              AND ic.is_included_column = 0
            ORDER BY ic.index_column_id
            FOR XML PATH ('') ),1,1,'' ) index_columns,
    STUFF( (SELECT ',' + tc.name
            FROM sys.index_columns ic
              JOIN sys.columns tc
               ON tc.column_id = ic.column_id AND
                  tc.object_id = ic.object_id
            WHERE ic.object_id = i.object_id AND
                  ic.index_id = i.index_id
               AND ic.is_included_column = 1
            ORDER BY ic.index_column_id
            FOR XML PATH ('') ),1,1,'' ) include_columns
  FROM sys.indexes i
    JOIN sys.objects o ON o.object_id = i.object_id
  WHERE i.index_id > 0 AND i.type_desc <> 'XML'
    AND object_name(i.object_id) LIKE @TABLENAME
    AND i.is_disabled = 0 
    AND schema_name(o.schema_id) LIKE @SCHEMANAME )
SELECT substring(schema_name(included_indexes.schema_id),1,30) AS owner, 
  object_name(included_indexes.object_id) table_name,
  (SELECT SUM(st.row_count) FROM sys.dm_db_partition_stats st
   WHERE st.object_id = included_indexes.object_id
     AND st.index_id < 2 ) num_rows,
  included_indexes.name included_index_name, 
  included_indexes.index_columns included_index_columns, 
  included_indexes.include_columns
       included_index_include_columns,
  included_indexes.type_desc included_index_type, 
  included_indexes.is_unique included_index_uniqueness,
  included_indexes.is_primary_key included_index_PK,
  (SELECT SUM(a.total_pages) * 8 FROM sys.allocation_units a
    JOIN sys.partitions p ON a.container_id = p.partition_id
   WHERE p.object_id = included_indexes.object_id AND
     p.index_id = included_indexes.index_id
  ) included_index_size_kb,
  including_indexes.name including_index_name, 
  including_indexes.index_columns including_index_columns, 
  including_indexes.include_columns
       including_index_include_columns,
  including_indexes.type_desc including_index_type, 
  including_indexes.is_unique including_index_uniqueness,
  including_indexes.is_primary_key including_index_PK,
  (SELECT SUM(a.total_pages) * 8 FROM sys.allocation_units a
     JOIN sys.partitions p ON a.container_id = p.partition_id
   WHERE p.object_id = including_indexes.object_id AND
    p.index_id = including_indexes.index_id
  ) including_index_size_kb
FROM ind_list included_indexes
  JOIN ind_list including_indexes
    ON including_indexes.object_id = included_indexes.object_id
  JOIN sys.partitions ing_p 
   ON ing_p.object_id = including_indexes.object_id AND
      ing_p.index_id = including_indexes.index_id
  JOIN sys.allocation_units ing_a
   ON ing_a.container_id = ing_p.partition_id
WHERE including_indexes.index_id <> included_indexes.index_id
  AND LEN(included_indexes.index_columns) <=
      LEN(including_indexes.index_columns) 
  AND included_indexes.index_columns + ',' =
      SUBSTRING(including_indexes.index_columns,1,
              LEN(included_indexes.index_columns + ','))
ORDER BY 2 DESC;

----------------------------------------------------------High index fragmentation check 

print N'Index with HIGH Fragmentation';

EXEC sp_MSforeachdb '
USE [?]
SELECT ''?'' AS DB_NAME,
QUOTENAME(sysind.name) AS [index_name],
indstat.*
FROM sys.dm_db_index_physical_stats (DB_ID(), NULL, NULL, NULL, ''LIMITED'')
AS indstat
INNER JOIN sys.indexes sysind ON indstat.object_id = sysind.object_id AND
indstat.index_id = sysind.index_id
where avg_fragmentation_in_percent >= 30
ORDER BY avg_fragmentation_in_percent DESC;
'

use master
GO


-----------------Tables that have not had a stats update in more than 30 days and have significant changes


EXEC sp_MSforeachdb '
USE [?]
IF ''?'' <> ''master'' AND ''?'' <> ''model'' AND ''?'' <> ''msdb'' AND ''?'' <> ''tempdb''
SELECT ''?'' AS DB_NAME,obj.name, obj.object_id, stat.name, stat.stats_id, last_updated, modification_counter  
FROM sys.objects AS obj   
INNER JOIN sys.stats AS stat ON stat.object_id = obj.object_id  
CROSS APPLY sys.dm_db_stats_properties(stat.object_id, stat.stats_id) AS sp  
WHERE obj.type = ''U''
AND last_updated < getdate() - 30 
AND modification_counter > 1000
--OR last_updated IS NULL;
'

----------------------------------------------------------Wait stats

print N'Wait Stats';

SELECT *
FROM sys.dm_os_wait_stats
where wait_time_ms > 10000
ORDER BY wait_time_ms DESC;


----------------------------------------------------------Users and roles

print N'Users and Roles';

WITH Roles_CTE(Role_Name, Username)
AS
(
	SELECT 
		User_Name(sm.[groupuid]) as [Role_Name],
		user_name(sm.[memberuid]) as [Username]
	FROM [sys].[sysmembers] sm
)

SELECT  
    Roles_CTE.Role_Name,
    [DatabaseUserName] = princ.[name],
    [UserType] = CASE princ.[type]
                    WHEN 'S' THEN 'SQL User'
                    WHEN 'U' THEN 'Windows User'
                    WHEN 'G' THEN 'Windows Group'
                    WHEN 'A' THEN 'Application Role'
                    WHEN 'R' THEN 'Database Role'
                    WHEN 'C' THEN 'User mapped to a certificate'
                    WHEN 'K' THEN 'User mapped to an asymmetric key'
                 END
FROM 
    sys.database_principals princ 
JOIN Roles_CTE on Username = princ.name
where princ.type in ('S', 'U', 'G', 'A', 'R', 'C', 'K')
ORDER BY princ.name;


----------------------------------------------------------Job information

print N'Job Information';


SELECT	 [JobName] = [jobs].[name]
		,[Category] = [categories].[name]
		,[Owner] = SUSER_SNAME([jobs].[owner_sid])
		,[Enabled] = CASE [jobs].[enabled] WHEN 1 THEN 'Yes' ELSE 'No' END
		,[Scheduled] = CASE [schedule].[enabled] WHEN 1 THEN 'Yes' ELSE 'No' END
		,[Description] = [jobs].[description]
		,[Occurs] = 
				CASE [schedule].[freq_type]
					WHEN   1 THEN 'Once'
					WHEN   4 THEN 'Daily'
					WHEN   8 THEN 'Weekly'
					WHEN  16 THEN 'Monthly'
					WHEN  32 THEN 'Monthly relative'
					WHEN  64 THEN 'When SQL Server Agent starts'
					WHEN 128 THEN 'Start whenever the CPU(s) become idle' 
					ELSE ''
				END
		,[Occurs_detail] = 
				CASE [schedule].[freq_type]
					WHEN   1 THEN 'O'
					WHEN   4 THEN 'Every ' + CONVERT(VARCHAR, [schedule].[freq_interval]) + ' day(s)'
					WHEN   8 THEN 'Every ' + CONVERT(VARCHAR, [schedule].[freq_recurrence_factor]) + ' weeks(s) on ' + 
						LEFT(
							CASE WHEN [schedule].[freq_interval] &  1 =  1 THEN 'Sunday, '    ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] &  2 =  2 THEN 'Monday, '    ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] &  4 =  4 THEN 'Tuesday, '   ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] &  8 =  8 THEN 'Wednesday, ' ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] & 16 = 16 THEN 'Thursday, '  ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] & 32 = 32 THEN 'Friday, '    ELSE '' END + 
							CASE WHEN [schedule].[freq_interval] & 64 = 64 THEN 'Saturday, '  ELSE '' END , 
							LEN(
								CASE WHEN [schedule].[freq_interval] &  1 =  1 THEN 'Sunday, '    ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] &  2 =  2 THEN 'Monday, '    ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] &  4 =  4 THEN 'Tuesday, '   ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] &  8 =  8 THEN 'Wednesday, ' ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] & 16 = 16 THEN 'Thursday, '  ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] & 32 = 32 THEN 'Friday, '    ELSE '' END + 
								CASE WHEN [schedule].[freq_interval] & 64 = 64 THEN 'Saturday, '  ELSE '' END 
							) - 1
						)
					WHEN  16 THEN 'Day ' + CONVERT(VARCHAR, [schedule].[freq_interval]) + ' of every ' + CONVERT(VARCHAR, [schedule].[freq_recurrence_factor]) + ' month(s)'
					WHEN  32 THEN 'The ' + 
							CASE [schedule].[freq_relative_interval]
								WHEN  1 THEN 'First'
								WHEN  2 THEN 'Second'
								WHEN  4 THEN 'Third'
								WHEN  8 THEN 'Fourth'
								WHEN 16 THEN 'Last' 
							END +
							CASE [schedule].[freq_interval]
								WHEN  1 THEN ' Sunday'
								WHEN  2 THEN ' Monday'
								WHEN  3 THEN ' Tuesday'
								WHEN  4 THEN ' Wednesday'
								WHEN  5 THEN ' Thursday'
								WHEN  6 THEN ' Friday'
								WHEN  7 THEN ' Saturday'
								WHEN  8 THEN ' Day'
								WHEN  9 THEN ' Weekday'
								WHEN 10 THEN ' Weekend Day' 
							END + ' of every ' + CONVERT(VARCHAR, [schedule].[freq_recurrence_factor]) + ' month(s)' 
					ELSE ''
				END
		,[Frequency] = 
				CASE [schedule].[freq_subday_type]
					WHEN 1 THEN 'Occurs once at ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':')
					WHEN 2 THEN 'Occurs every ' + 
								CONVERT(VARCHAR, [schedule].[freq_subday_interval]) + ' Seconds(s) between ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':') + ' and ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_end_time]), 6), 5, 0, ':'), 3, 0, ':')
					WHEN 4 THEN 'Occurs every ' + 
								CONVERT(VARCHAR, [schedule].[freq_subday_interval]) + ' Minute(s) between ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':') + ' and ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_end_time]), 6), 5, 0, ':'), 3, 0, ':')
					WHEN 8 THEN 'Occurs every ' + 
								CONVERT(VARCHAR, [schedule].[freq_subday_interval]) + ' Hour(s) between ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_start_time]), 6), 5, 0, ':'), 3, 0, ':') + ' and ' + 
								STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [schedule].[active_end_time]), 6), 5, 0, ':'), 3, 0, ':')
					ELSE ''
				END
		,[AvgDurationInSec] = CONVERT(DECIMAL(10, 2), [jobhistory].[AvgDuration])
		,[Next_Run_Date] = 
				CASE [jobschedule].[next_run_date]
					WHEN 0 THEN CONVERT(DATETIME, '1900/1/1')
					ELSE CONVERT(DATETIME, CONVERT(CHAR(8), [jobschedule].[next_run_date], 112) + ' ' + 
						 STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), [jobschedule].[next_run_time]), 6), 5, 0, ':'), 3, 0, ':'))
				END
FROM	 [msdb].[dbo].[sysjobs] AS [jobs] WITh(NOLOCK) 
		 LEFT OUTER JOIN [msdb].[dbo].[sysjobschedules] AS [jobschedule] WITh(NOLOCK) 
				 ON [jobs].[job_id] = [jobschedule].[job_id] 
		 LEFT OUTER JOIN [msdb].[dbo].[sysschedules] AS [schedule] WITh(NOLOCK) 
				 ON [jobschedule].[schedule_id] = [schedule].[schedule_id] 
		 INNER JOIN [msdb].[dbo].[syscategories] [categories] WITh(NOLOCK) 
				 ON [jobs].[category_id] = [categories].[category_id] 
		 LEFT OUTER JOIN 
					(	SELECT	 [job_id], [AvgDuration] = (SUM((([run_duration] / 10000 * 3600) + 
																(([run_duration] % 10000) / 100 * 60) + 
																 ([run_duration] % 10000) % 100)) * 1.0) / COUNT([job_id])
						FROM	 [msdb].[dbo].[sysjobhistory] WITh(NOLOCK)
						WHERE	 [step_id] = 0 
						GROUP BY [job_id]
					 ) AS [jobhistory] 
				 ON [jobhistory].[job_id] = [jobs].[job_id];


----------------------------------------------------------Existing linked server listing


print N'Linked Server Information';


declare @x int;
select @x = COUNT(name) 
from sys.all_objects
where name = 'Servers';
if @x <> 0
SELECT * 
--c.name, provider, data_source, is_remote_login_enabled, b.modify_date
FROM sys.Servers a
LEFT OUTER JOIN sys.linked_logins b ON b.server_id = a.server_id
LEFT OUTER JOIN sys.server_principals c ON c.principal_id = b.local_principal_id
where a.server_id <> 0;
else
exec sp_linkedservers;


----------------------------------------------------------User statistics identification

print N'User statistics';

select s.name as STATS_NAME, SCHEMA_NAME(ob.Schema_id) AS SCHEMA_NAME, OBJECT_NAME(s.object_id) AS TABLE_NAME
FROM sys.stats s
INNER JOIN sys.Objects ob ON ob.Object_id = s.object_id
WHERE SCHEMA_NAME(ob.Schema_id) <> 'sys'
AND Auto_Created = 0 AND User_Created = 1;





Loading CSV data files into MySQL 8 – manual method

In my earlier MySQL 8 post here, I detailed the process for creating a MySQL 8 test environment.  If you have the MySQL 8 environment or some other newly-created MySQL 8 environment, you probably need to create some data in it now.  I wanted to load a decent amount of table data and aside from creating a simulated data set, loading a CSV file seemed to be a convenient method.  I chose to load a census data file in CSV format from https://data.census.gov/cedsci/.  This data if freely available, sizeable, and lends itself well to analytical queries later on.

Reviewing the process to load the data, many of you will notice the similarities between this process and the Oracle database external file loading process.  However, there is a gotcha that I encountered in MySQL that I will outline here.

First, you need to review your CSV data file to design and create a target table in MySQL.  I won’t cover the basics of starting the mysqld service or logging into the MySQL database, but will only relay the commands I used to created my target table:

CREATE TABLE census (id INT);
ALTER TABLE census ADD COLUMN Year INT;
ALTER TABLE census ADD COLUMN Age INT;
ALTER TABLE census ADD COLUMN Ethic INT;
ALTER TABLE census ADD COLUMN Sex VARCHAR(10);
ALTER TABLE census ADD COLUMN Area VARCHAR(10);

After creating the target table, I copied the CSV data file to a location on my MySQL database server and ensured that it was readable by all parties.

I then tried to load the CSV data file, but encountered the gotcha.

mysql> LOAD DATA LOCAL INFILE '/home/jonadams/Data8277.csv' INTO TABLE census FIELDS TERMINATED BY ',' ENCLOSED BY '"' IGNORE 1 ROWS;
ERROR 3948 (42000): Loading local data is disabled; this must be enabled on both the client and server sides

Through trial and error, I found this setting is the default and that this next process was the easiest method to accomplish changing this setting:

  • Make a backup of the MySQL configuration file.  In my case (RHEL7 and MySQL 8 Community Edition, this file is /etc/my.cnf).  I recommend this step before editing any configuration file.
  • Add local_infile = 1 to the [mysqld] section of the /etc/my.cnf file and restart the mysqld service.
  • Log into mysql and execute the following command to verify the changed setting.
 MySQL localhost:33060+ ssl sakila SQL > SHOW GLOBAL VARIABLES LIKE 'local_infile';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| local_infile | ON |
+---------------+-------+
1 row in set (0.0027 sec)
  • Now the kicker.  If you logged into MySQL using mysqlsh, you need to exit and login using mysql because you will need to use a command line argument that is not accepted by mysqlsh.
  • Here is the log of my session including the load command and results.  Keep in mind that you only need to use the IGNORE 1 ROWS clause if there is a header line in the CSV data file.
root@localhost ~]# mysql -u root -p --local_infile=1
Enter password: 
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 10
Server version: 8.0.19 MySQL Community Server - GPL

Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> use sakila;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> LOAD DATA LOCAL INFILE '/home/jonadams/Data8277.csv' INTO TABLE census FIELDS TERMINATED BY ',' ENCLOSED BY '"' IGNORE 1 ROWS;

Query OK, 34959672 rows affected (2 min 59.56 sec)
Records: 34959672  Deleted: 0  Skipped: 0  Warnings: 0

If the results of the load process show warnings or errors, you can use the SHOW WARNINGS or SHOW ERRORS command to display the warning or errors messages.  Use the limit option to keep the number of warnings at a manageable level if there are hundreds or thousands.

mysql> show warnings limit 100;

You may be concerned that you are changing the default settings of MySQL that prevent the loading of local data files, but this process only works if you alter the default settings in the configuration file AND use the local_infile settings when logging into MySQL.

If you prefer a graphical method for this process, you can also load data into tables using the MySQL Workbench.   The workbench includes many useful options including performance monitoring, import and export of data, querying and script execution, and data modeling.

There is also a graphical interface for loading data files into MySQL called phpMyAdmin which is browser-based and requires the installation and configuration of PHP and HTTPD (Apache).

Python and cx_Oracle for Oracle database connections

In my latest adventures, I have been improving my Python skills in regards to running Oracle SQL and PL/SQL code.  Once I got the process down for basic connection and execution, a database migration including new LDAP connectivity threw me for a loop until I got it figured out.

First, a little background on cx_oracle.  cx_Oracle is a Python extension module that enables access to Oracle databases. It conforms to the Python database API 2.0 specification with a considerable number of additions and a couple of exclusions.  cx_Oracle 7 has been tested with Python version 2.7, and with versions 3.5 through 3.8. You can use cx_Oracle with Oracle 11.2, 12, 18 and 19 client libraries. Oracle’s standard client-server version functionality allows connection to both older and newer databases. For example Oracle 19c client libraries can connect to Oracle Database 11.2.

To check if you have the cx_oracle module installed in your current Python environment, you can simply create a Python script with the line import cx_Oracle and execute it with python <filename> or execute this command from the Python command line.  If there are no errors, then the module is likely installed.

If not, you can install it in Linux with the command with the pip (python installs packages) module:

python -m pip install cx_Oracle --upgrade

If you receive an error about the pip module being unavailable, you can install it with the command:

yum install python-pip

Now that we know what cx_oracle is for and how to install it, here is an example of using it to connect to an Oracle RAC system:

import sys
import os
import cx_Oracle

dsnStr = cx_Oracle.makedsn('<SCAN_NAME>','<LISTEN_PORT>',service_name='<SERVICE_NAME>')
con = cx_Oracle.connect(user='<USER>',password='PWD',dsn=dsnStr)
cur.con.cursor()
sql="""
<SQL or PL/SQL to execute>
"""
cur.execute(sql)
cur.close()
con.close()


Seems pretty simple.  This changed when we tried to configure a connection similar to the one above for a database using LDAP authentication.  The new sqlnet.ora and ldap.ora files where in place, but every connection attempt resulted in a ORA-12154 error.  In my particular case, Oracle instant client 12.2 and (regular) client 18c were installed on the server where these scripts were executed.  Even placing the necessary .ora network files in both locations resulted in the same errors.

The python command print os.environ will give you a list of all the environment variables that are set in the current environment.  Note – be sure to use the import os command to import the proper libraries first.  This was an important finding for me since I was using the Jenkins development environment and the actual execution user was not something I could see.  Using this command, I found out the identity of the OS user whom the python scripts were executing under.  After getting access to the development server, this account did not have any of the Oracle environment settings associated with it such as ORACLE_HOME, TNS_ADMIN, or LD_LIBRARY_PATH.  My next step was to set these under the user account on the server.  Even so, the attempts to connect to the Oracle database resulted in the same ORA-12154 error.  As a curious side, the print os.environ command did not list these variables after setting them at the OS level for the user account.

Now what?

Well, if you could pull up the environment variables using os.environ, couldn’t you set them as well?  Setting the values of ORACLE_HOME, TNS_ADMIN, and LD_LIBRARY_PATH in the Python script turned out to be the answer and ensured I was pointing to the correct Oracle client binaries.  Here is the resulting code:

import sys
import os
import cx_Oracle

os.environ['ORACLE_HOME'] = '/opt/oracle/version'
os.environ['TNS_ADMIN'] = '/opt/oracle/version/network/admin'
os.environ['LD_LIBRARY_PATH'] = '/opt/oracle/version/lib'

con = cx_Oracle.connect('{}/{}@{}'.format('<USER>','<PWD>','<LDAP_DESIGNATOR>'))
cur.con.cursor()
sql="""
<SQL or PL/SQL to execute>
"""
cur.execute(sql)
cur.close()
con.close()

Also, while working to get the python script to recognize the Oracle environment variable settings at the OS level, I discovered the use of the Python subprocess module in regards to calling scripts at the OS level.  This was a work around that could take advantage of the OS account environment settings.  However, I was told that even if username and password credentials were masked as they were passed in, they could be displayed by executing ps -ef on the server.  I have not been able to verify this, but it could be a security issue if executing Python scripts against production databases.

import os 
import subprocess 

output = subprocess.check_output(['<PATH_AND_SCRIPT_NAME',str('<ARG1>'),str('<ARG12')]) 
print(output)

If the script called executes SQL or PL/SQL, it is a good idea to use the WHENEVER SQLERROR EXIT 1 clause to ensure that any errors cause a failure and are captured.

 

 

SQL Server Full Backup Verification

As database administrators, our first focus in our production environments should be backup and recovery.  I have mentioned this before in previous posts and it can never be said too much.  Sure, you can backup your database left and right, but unless you verify the backups with practice recovery or some other type of verification, your backup and recovery plan is unreliable.

SQL Server author Steve Rezhener (https://www.sqlservercentral.com/author/walkair) has just published a scripted backup verification process that I thought was interesting.  I tested this script on a SQL Server 2019 Developer Edition lab environment and wanted to pass this process on along with my comments.

This verification script performs the following actions:

  1. Gets the latest full backup of the user-designated database
  2. Checks if there is enough free space for the trial recovery of the user-designated database
  3. Restores the latest full backup into a copy database
  4. The copy database is checked for corruption
  5. The copy database is removed
  6. The check results are placed into a temporary table, published, then dropped

Here are the contents of the published script.

DECLARE @DBName   AS VARCHAR(100) = 'DemoDB'
DECLARE @SQLString NVARCHAR(500), @BackupFileName AS NVARCHAR(500)
DECLARE @ParmDefinition nvarchar(500)
DECLARE @Restore_DBName AS VARCHAR(100) = @DBName
DECLARE @DataFileSpaceTaken_GB AS INT, @FreeDriveSpace_GB AS INT
DECLARE @OriginalDataFileName AS VARCHAR(500), @OriginalLogFileName AS VARCHAR(500)-- just in case
IF EXISTS(SELECT database_id FROM sys.databases WHERE name=@DBName)
BEGIN
--sys.databases
 
PRINT '-- 1. GET LATEST BACKUP FILE FOR A DATABASE--'
SELECT TOP 1 @BackupFileName = bakfams.Physical_Device_Name
--bakfams.Physical_Device_Name
, @DataFileSpaceTaken_GB = masfiles.size/1024/1024/1024
, @FreeDriveSpace_GB = osvols.available_bytes/1024/1024/1024
, @OriginalDataFileName = masfiles.physical_name
, @OriginalLogFileName = masfiles2.physical_name
FROM sys.databases AS dats
INNER JOIN msdb.dbo.backupset AS baks    ON baks.database_name = dats.name
INNER JOIN msdb.dbo.backupmediafamily AS bakfams ON baks.media_set_id = bakfams.media_set_id
INNER JOIN sys.master_files AS masfiles    ON masfiles.database_id = dats.database_id AND masfiles.type_desc = 'ROWS'
CROSS APPLY sys.dm_os_volume_stats(masfiles.database_id, masfiles.file_id) AS osvols
LEFT OUTER JOIN sys.master_files AS masfiles2  ON masfiles2.database_id = dats.database_id AND masfiles2.type_desc = 'LOG'
WHERE 1=1
AND dats.name = @DBName
AND baks.[type]='D'
ORDER BY baks.backup_finish_date DESC
PRINT @BackupFileName
 
PRINT '-- 2. CREATE DATABASE NAME TO RESTORE --'
SET @Restore_DBName = @Restore_DBName + '_' + DATENAME(MONTH,GETDATE())
SET @Restore_DBName = @Restore_DBName + CONVERT(VARCHAR(2),DAY(GETDATE()))
SET @Restore_DBName = @Restore_DBName + CONVERT(VARCHAR(4),YEAR(GETDATE()))
PRINT @Restore_DBName

PRINT '-- 3. CHECK FREE DISKSPACE TO RESTORE THE DATABASE --'
PRINT @DataFileSpaceTaken_GB
PRINT @FreeDriveSpace_GB
IF (@FreeDriveSpace_GB<@DataFileSpaceTaken_GB*2) BEGIN PRINT '-- not enough space --'; RETURN; END
 
PRINT '-- 4. RESTORE DB--'
SET @SQLString = 'RESTORE DATABASE [' + @Restore_DBName + ']'
SET @SQLString = @SQLString + ' FROM DISK = N''' + @BackupFileName + ''''
SET @SQLString = @SQLString + ' WITH FILE = 1,'
SET @SQLString = @SQLString + ' MOVE N''' + @DBName + ''''
SET @SQLString = @SQLString + ' TO N''' + REPLACE(@OriginalDataFileName,@DBName,@Restore_DBName) + ''''
SET @SQLString = @SQLString + ', MOVE N''' + @DBName + '_log'''
SET @SQLString = @SQLString + ' TO N''' + REPLACE(@OriginalLogFileName,@DBName,@Restore_DBName) + ''''
SET @SQLString = @SQLString + ', NOUNLOAD, REPLACE, STATS = 10'
PRINT @SQLString
EXECUTE sp_executesql @SQLString
--RETURN
 
PRINT '-- 5. CHECK RESTORED DATABASE--'
SET @SQLString = 'DBCC CHECKDB ('+ @Restore_DBName + ')'
SET @SQLString = @SQLString + ' WITH NO_INFOMSGS ' -- WITH TABLERESULTS
EXECUTE sp_executesql @SQLString
 
PRINT '-- 6. DROP RESTORED DATABASE--'
SET @SQLString = 'DROP DATABASE ' + @Restore_DBName
EXECUTE sp_executesql @SQLString
 
PRINT '—-7. CREATE TEMP winlog TABLE --'
IF OBJECT_ID('tempdb..#winlog') !=0 DROP TABLE #winlog
CREATE TABLE #winlog
(
rowID  INT IDENTITY(1,1)
, LogDate  DATETIME
, ProcessInfo  VARCHAR(50)
, textRow  VARCHAR(4000)
)
PRINT  '-- 8. STORE DBCC CHECKDB RESULTS --'
INSERT INTO #winlog(LogDate, ProcessInfo, textRow)
EXEC master.dbo.xp_readerrorlog
PRINT  '-- 9. LOCATE LAST/FIRST ROWID —-'
DECLARE @textRow nvarchar(500), @1stRowID AS INT = 0
SET @SQLString = 'SELECT TOP 1 @x1stRowID = rowID'
SET @SQLString = @SQLString + ' FROM #winlog'
SET @SQLString = @SQLString + ' WHERE textRow = ''Starting up database ''''' + @Restore_DBName + '''''.'''
SET @SQLString = @SQLString + ' ORDER BY rowID DESC'
SET @ParmDefinition = N'@x1stRowID AS INT OUTPUT';
EXECUTE sp_executesql @SQLString, @ParmDefinition, @x1stRowID = @1stRowID OUTPUT;
SET @SQLString = 'SELECT *'
SET @SQLString = @SQLString + ' FROM #winlog'
SET @SQLString = @SQLString + ' WHERE RowID >= @xRowID'
SET @SQLString = @SQLString + ' ORDER BY rowID DESC'
PRINT 'SELECT FROM WINLOG: ' + @SQLString
 
PRINT '-- 10. DISPLAY RESULTS--'
SET @ParmDefinition = N'@xRowID INT';
EXECUTE sp_executesql @SQLString, @ParmDefinition, @xRowID = @1stRowID
DROP TABLE #winlog
END


When executed in my lab environment, this is what I saw (the latest entries are first):

rowID LogDate ProcessInfo textRow
105 2019-11-21 07:37:28.030 spid56 DBCC CHECKDB (Hospital_November212019) WITH no_infomsgs 
executed by WIN-NVFHQDPGP9D\Administrator found 0 errors and repaired 0 errors. 
Elapsed time: 0 hours 0 minutes 0 seconds. Internal database snapshot has split 
point LSN = 00000022:00000b2f:0001 and first LSN = 00000022:00000b2d:0001. 

104 2019-11-21 07:37:27.810 Backup RESTORE DATABASE successfully processed 449 pages 
in 0.029 seconds (120.740 MB/sec).

103 2019-11-21 07:37:27.780 Backup Database was restored: Database: Hospital_November212019, 
creation date(time): 2019/01/03(14:07:43), first LSN: 34:2822:1, last LSN: 34:2825:1, 
number of dump devices: 1, device information: (FILE=1, TYPE=DISK: 
{'C:\Program Files\Microsoft SQL Server\MSSQL13.MSSQLSERVER\MSSQL\Backup\Hospital.bak'}). 
Informational message. No user action required.

102 2019-11-21 07:37:27.760 Backup Restore is complete on database 'Hospital_November212019'. The database 
is now available.

101 2019-11-21 07:37:27.620 spid56 Parallel redo is shutdown for database 'Hospital_November212019' 
with worker pool size [1].

100 2019-11-21 07:37:27.530 spid56 Parallel redo is started for database 'Hospital_November212019' 
with worker pool size [1].

99 2019-11-21 07:37:27.500 spid56 Starting up database 'Hospital_November212019'.



Note that this script only works for full and not incremental backups.  The script needs to be able to find at least one full backup or it will fail.

My own raspberry pi cloud

When I was first working with computers, I used to build and upgrade my own desktop PCs.  This was back when finished PCs were pricey and it was cheaper to build your own if you were willing to put in the time and effort.  There were times of frustration but ultimately a sense of accomplishment when you were finished.  A lot of knowledge of the inner workings of a PC as well as tuning tips and tricks were earned as well.

Many years later, hardware is still relatively cheap, but a finished PC is as well.  Laptops have also replaced desktops in many situations now that their pricing has become more affordable.  Now it is typically game players and hobbyists whom still actually build desktops.

I gave up building my own desktops long ago, but I still have one old Gateway PC for playing old FPS games like Doom when I’m feeling nostalgic.  When it finally conks out, I will probably have to set up a virtual machine or two for this task.

I started playing with virtual machines back when I wanted to work with versions of Oracle when they were first introduced.  As any Oracle veteran knows, the AIX, Unix, and Linux versions would always be rolled out first and it would be much later when the Windows versions were introduced.  Not wanting to wait for the Windows versions, I started using VMWare and eventually VirtualBox to start working with the Unix versions.

Over the years, I have massed quite a lot of VMs, not wanting to create them over and over to work with specific Oracle products and operating systems.  Colleagues have asked me for copies of some of these VMs from time to time and the size of them makes it difficult to post or send them.  This is also true for files, scripts, and toolkits that we as consultants carry around with us between client engagements.

The introduction of the Raspberry Pi has opened a new world of low-cost computing power that can be used in a variety of ways and I see new projects posted all the time – everything from weather stations to smart home devices.

I recently saw a project for a home cloud using a raspberry pi device that can be configured to upload and download files remotely and saw this as a way to post my VMs for my colleagues that were interested.  Since I have an old (version 2) raspberry pi laying around that I unsuccessfully tried to turn into a NAS, I thought it would be perfect for this use.

The process to create my own cloud was found here – https://pimylifeup.com/raspberry-pi-owncloud/ and the process allowed me to get my own cloud set up.

A word of caution, though.  Since this is not a highly-secured platform, do not place any sensitive files on this cloud.  If it is on your home network, it will require a port forward setting on your router to enable outside access.  On the positive side, you can have links secured with passwords for both folders and individual files to share with others.

Update 12/17/19

While having the personal cloud was great, there was a problem uploading large files to my cloud.  The files in question would be several GB in size and the upload process through the web interface just could not handle them.  I found a blog on manually adding files to owncloud (https://bartsimons.me/manually-add-files-to-owncloud/), but after adding the files, the occ scan command to add the new files to the owncloud database failed with the message “This version of ownCloud is not compatible with PHP 7.3 You are currently running PHP 7.3.0.”.  The next higher available version of owncloud, 10.3.2, seemed to have the same problem according to my online research.

At the time of creating this environment, owncloud was on version 10.2.14 and I had also installed PHP 7.3 as well as PHP 7.2, with 7.3 being the default version in use. Version 7.2 did not seem to have this problem on owncloud, so I had to alter the rapsberry pi Debian environment to use PHP version 7.2 instead.

Logged into the rapsberry pi as the default pi user, I started with the “which php” command to find where it was executing PHP from.  It pointed to /usr/bin/php, which then pointed to the link /etc/alternatives/php, and finally to /usr/bin/php7.3.

As PHP version 7.2 was also installed, the php7.2 executable was also located under /usr/bin.  I then removed the /etc/alternatives/php link with a simple rm command, then created a new link pointing to the 7.2 executable – “ln -s /usr/bin/php7.2 /etc/alternatives/php

To test the process, I manually copied files to the external drive using this process:

  • Power down the raspberry pi, remove my external USB drive
  • Copy a folder into the files# directory of the default user account on the USB drive.  The name of this user is the admin account that was set up during initial owncloud configuration.
  • Plug the USB drive back into the raspberry pi and power it up.
  • Log in as the pi user account, open a terminal, and go to /var/www/owncloud
  • Execute the scan command “sudo -u www-data php occ files:scan –all” (two hyphens before the all option)
  • At the end of the scan, the new files were visible through the owncloud web interface

Update 1/10/20

After finding the process above for manually adding files to the external drive, I thought about having the file scan process execute on boot up to save time when adding files.  I created a file under /usr/bin on my raspberry pi called file-scan with permissions 744 as root with the following two commands:

cd /var/www/owncloud

sudo -u www-data php occ files:scan --all

I then edited the /etc/rc.local file and added the line /usr/bin/file-scan and now the file scan process executes every time I boot up.

MySQL 8 test environment setup

Having just passed the newly-revised MySQL 2019 Implementation Specialist certification (https://www.youracclaim.com/badges/0f7ee5bd-3f91-4611-8b33-08861f67bb7d/linked_in), I thought I would pass on some of the MySQL information in the form of an introduction for those thinking about creating an environment for experimentation.

My test environment will be created on an Oracle Enterprise Linux (OEL) version 7 update 7 virtual machine running under Oracle VirtualBox version 6.0.10 because I like doing things as cheaply as possible.

There are numerous articles on creating OEL virtual machines, so let me just say that OEL is available freely through Oracle E-delivery after creating a free account and Oracle VirtualBox is free as well and runs quite well under Windows 10.  For the Mac users out there, you will have to find your own virtual machine hosting utility.  Sorry, Mac.

My OEL7 VM is a simple construct of 2 CPU cores, 5GB of memory, and a 100GB dynamically sized HDD.  You should not need that much space, but I tend to size mine large and let them grow if needed.

Once the OEL7 VM is created and running, it needs to be able to access the internet to download the MySQL repository package as well as the actual MySQL software during the installation process.  Also, if you are not installing the software as the root user, the account you will be using will need sudo privileges.

Inside the VM, go to https://dev.mysql.com/downloads/repo/yum/ and download the desired repository RPM package.  An Oracle web account will be needed prior to being able to download.  You can use the one you created for the E-delivery download earlier if necessary.  Install the package with the command: sudo rpm -Uvh <package_name>.

Execute the following command to check the available MySQL versions from the newly-installed repository:

[jonadams@localhost Downloads]$ sudo yum repolist all | grep mysql

mysql-cluster-7.5-community/x86_64 MySQL Cluster 7.5 Community disabled
mysql-cluster-7.5-community-source MySQL Cluster 7.5 Community - disabled
mysql-cluster-7.6-community/x86_64 MySQL Cluster 7.6 Community disabled
mysql-cluster-7.6-community-source MySQL Cluster 7.6 Community - disabled
mysql-cluster-8.0-community/x86_64 MySQL Cluster 8.0 Community disabled
mysql-cluster-8.0-community-source MySQL Cluster 8.0 Community - disabled
mysql-connectors-community/x86_64 MySQL Connectors Community enabled: 118
mysql-connectors-community-source MySQL Connectors Community - disabled
mysql-tools-community/x86_64 MySQL Tools Community enabled: 95
mysql-tools-community-source MySQL Tools Community - Sourc disabled
mysql-tools-preview/x86_64 MySQL Tools Preview disabled
mysql-tools-preview-source MySQL Tools Preview - Source disabled
mysql55-community/x86_64 MySQL 5.5 Community Server disabled
mysql55-community-source MySQL 5.5 Community Server - disabled
mysql56-community/x86_64 MySQL 5.6 Community Server disabled
mysql56-community-source MySQL 5.6 Community Server - disabled
mysql57-community/x86_64 MySQL 5.7 Community Server disabled
mysql57-community-source MySQL 5.7 Community Server - disabled
mysql80-community/x86_64 MySQL 8.0 Community Server enabled: 129
mysql80-community-source MySQL 8.0 Community Server - disabled

You can see which versions and tools are enabled. The enabled ones will typically be
the newest versions.  Install the enabled community server:

[jonadams@localhost Downloads]$ sudo yum install mysql-community-server -y
Loaded plugins: langpacks, ulninfo
mysql-connectors-community | 2.5 kB 00:00:00
mysql-tools-community | 2.5 kB 00:00:00
mysql80-community | 2.5 kB 00:00:00
ol7_UEKR5 | 2.5 kB 00:00:00
ol7_latest | 2.7 kB 00:00:00
(1/4): ol7_UEKR5/x86_64/updateinfo | 50 kB 00:00:00
(2/4): ol7_latest/x86_64/updateinfo | 1.1 MB 00:00:01
(3/4): ol7_UEKR5/x86_64/primary_db | 6.6 MB 00:00:01
(4/4): ol7_latest/x86_64/primary_db | 20 MB 00:00:02
Resolving Dependencies
--> Running transaction check
---> Package mysql-community-server.x86_64 0:8.0.17-1.el7 will be installed
--> Processing Dependency: mysql-community-common(x86-64) = 8.0.17-1.el7 for package: mysql-community-server-8.0.17-1.el7.x86_64
--> Processing Dependency: mysql-community-client(x86-64) >= 8.0.11 for package: mysql-community-server-8.0.17-1.el7.x86_64
--> Running transaction check
---> Package mysql-community-client.x86_64 0:8.0.17-1.el7 will be installed
--> Processing Dependency: mysql-community-libs(x86-64) >= 8.0.11 for package: mysql-community-client-8.0.17-1.el7.x86_64
---> Package mysql-community-common.x86_64 0:8.0.17-1.el7 will be installed
--> Running transaction check
---> Package mariadb-libs.x86_64 1:5.5.64-1.el7 will be obsoleted
--> Processing Dependency: libmysqlclient.so.18()(64bit) for package: 2:postfix-2.10.1-7.el7.x86_64
--> Processing Dependency: libmysqlclient.so.18(libmysqlclient_18)(64bit) for package: 2:postfix-2.10.1-7.el7.x86_64
---> Package mysql-community-libs.x86_64 0:8.0.17-1.el7 will be obsoleting
--> Running transaction check
---> Package mysql-community-libs-compat.x86_64 0:8.0.17-1.el7 will be obsoleting
--> Finished Dependency Resolution

Dependencies Resolved

=============================================================================================================================
Package Arch Version Repository Size
=============================================================================================================================
Installing:
mysql-community-libs x86_64 8.0.17-1.el7 mysql80-community 3.0 M
replacing mariadb-libs.x86_64 1:5.5.64-1.el7
mysql-community-libs-compat x86_64 8.0.17-1.el7 mysql80-community 2.1 M
replacing mariadb-libs.x86_64 1:5.5.64-1.el7
mysql-community-server x86_64 8.0.17-1.el7 mysql80-community 415 M
Installing for dependencies:
mysql-community-client x86_64 8.0.17-1.el7 mysql80-community 32 M
mysql-community-common x86_64 8.0.17-1.el7 mysql80-community 589 k

Transaction Summary
=============================================================================================================================
Install 3 Packages (+2 Dependent packages)

Total size: 453 M
Total download size: 415 M
Downloading packages:
No Presto metadata available for mysql80-community
warning: /var/cache/yum/x86_64/7Server/mysql80-community/packages/mysql-community-server-8.0.17-1.el7.x86_64.rpm: Header V3 DSA/SHA1 Signature, key ID 5072e1f5: NOKEY
Public key for mysql-community-server-8.0.17-1.el7.x86_64.rpm is not installed
mysql-community-server-8.0.17-1.el7.x86_64.rpm | 415 MB 00:01:24
Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-mysql
Importing GPG key 0x5072E1F5:
Userid : "MySQL Release Engineering <mysql-build@oss.oracle.com>"
Fingerprint: a4a9 4068 76fc bd3c 4567 70c8 8c71 8d3b 5072 e1f5
Package : mysql80-community-release-el7-3.noarch (installed)
From : /etc/pki/rpm-gpg/RPM-GPG-KEY-mysql
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Warning: RPMDB altered outside of yum.
Installing : mysql-community-common-8.0.17-1.el7.x86_64 1/6
Installing : mysql-community-libs-8.0.17-1.el7.x86_64 2/6
Installing : mysql-community-client-8.0.17-1.el7.x86_64 3/6
Installing : mysql-community-server-8.0.17-1.el7.x86_64 4/6
Installing : mysql-community-libs-compat-8.0.17-1.el7.x86_64 5/6
Erasing : 1:mariadb-libs-5.5.64-1.el7.x86_64 6/6
Verifying : mysql-community-libs-8.0.17-1.el7.x86_64 1/6
Verifying : mysql-community-server-8.0.17-1.el7.x86_64 2/6
Verifying : mysql-community-common-8.0.17-1.el7.x86_64 3/6
Verifying : mysql-community-client-8.0.17-1.el7.x86_64 4/6
Verifying : mysql-community-libs-compat-8.0.17-1.el7.x86_64 5/6
Verifying : 1:mariadb-libs-5.5.64-1.el7.x86_64 6/6

Installed:
mysql-community-libs.x86_64 0:8.0.17-1.el7 mysql-community-libs-compat.x86_64 0:8.0.17-1.el7
mysql-community-server.x86_64 0:8.0.17-1.el7

Dependency Installed:
mysql-community-client.x86_64 0:8.0.17-1.el7 mysql-community-common.x86_64 0:8.0.17-1.el7

Replaced:
mariadb-libs.x86_64 1:5.5.64-1.el7

Complete!

Check the status of the mysqld service:

Start the mysqld service and check the status:

 

[jonadams@localhost Downloads]$ sudo systemctl start mysqld.service

[jonadams@localhost Downloads]$ sudo systemctl status mysqld.service
● mysqld.service – MySQL Server
Loaded: loaded (/usr/lib/systemd/system/mysqld.service; enabled; vendor preset: disabled)
Active: active (running) since Mon 2019-09-16 13:42:19 MDT; 5min ago
Docs: man:mysqld(8)
http://dev.mysql.com/doc/refman/en/using-systemd.html
Process: 5098 ExecStartPre=/usr/bin/mysqld_pre_systemd (code=exited, status=0/SUCCESS)
Main PID: 5183 (mysqld)
Status: “Server is operational”
Tasks: 39
CGroup: /system.slice/mysqld.service
└─5183 /usr/sbin/mysqld

Sep 16 13:42:13 localhost.localdomain systemd[1]: Starting MySQL Server...
Sep 16 13:42:19 localhost.localdomain systemd[1]: Started MySQL Server.

Get the temporary root password created during the first startup of the MySQL server:

[jonadams@localhost Downloads]$ sudo grep 'temporary password' /var/log/mysqld.log
2019-09-16T19:42:15.986474Z 5 [Note] [MY-010454] [Server] A temporary password is generated for root@localhost: 3dh>C0N!<Tpg

Change the root password as soon as possible by logging in with the generated,
temporary password and set a custom password for the superuser account:

shell> mysql -uroot -p

mysql> ALTER USER 'root'@'localhost' IDENTIFIED BY 'SemperFidelis1!';
Query OK, 0 rows affected (0.01 sec)

Note – MySQL’s validate_password plugin is installed by default. This will require that passwords contain at least one upper case letter, one lower case letter, one digit, and one special character, and that the total password length is at least 8 characters.

[jonadams@localhost Downloads]$ mysql -uroot -p
Enter password:
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 9
Server version: 8.0.17

Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql>

mysql> quit
Bye

Stop the mysqld service and check the status

[jonadams@localhost tmp]$ sudo systemctl stop mysqld.service
[jonadams@localhost tmp]$ sudo systemctl status mysqld.service
● mysqld.service - MySQL Server
Loaded: loaded (/usr/lib/systemd/system/mysqld.service; enabled; vendor preset: disabled)
Active: inactive (dead) since Mon 2019-09-16 17:45:44 MDT; 7s ago
Docs: man:mysqld(8)
http://dev.mysql.com/doc/refman/en/using-systemd.html
Process: 5183 ExecStart=/usr/sbin/mysqld $MYSQLD_OPTS (code=exited, status=0/SUCCESS)
Process: 5098 ExecStartPre=/usr/bin/mysqld_pre_systemd (code=exited, status=0/SUCCESS)
Main PID: 5183 (code=exited, status=0/SUCCESS)
Status: "Server shutdown complete"

Sep 16 13:42:13 localhost.localdomain systemd[1]: Starting MySQL Server...
Sep 16 13:42:19 localhost.localdomain systemd[1]: Started MySQL Server.
Sep 16 17:45:42 localhost.localdomain systemd[1]: Stopping MySQL Server...
Sep 16 17:45:44 localhost.localdomain systemd[1]: Stopped MySQL Server.

Note that the entries are timestamped so at first glance it appears that the
service was started before it was shut down, but according to the timestamps,
it was stopped about 4 hours after it was started.

We now want to install the MySQL shell utility.  It allows you to execute commands in JavaScript, Python, or SQL without special considerations.

[jonadams@localhost lib]$ sudo yum install mysql-shell
Loaded plugins: langpacks, ulninfo
epel/x86_64/metalink | 11 kB 00:00:00
epel | 5.4 kB 00:00:00
mysql-connectors-community | 2.5 kB 00:00:00
mysql-tools-community | 2.5 kB 00:00:00
mysql80-community | 2.5 kB 00:00:00
ol7_UEKR5 | 2.5 kB 00:00:00
ol7_latest | 2.7 kB 00:00:00
(1/2): epel/x86_64/updateinfo | 1.0 MB 00:00:00
(2/2): epel/x86_64/primary_db | 6.8 MB 00:00:00
Resolving Dependencies
--> Running transaction check
---> Package mysql-shell.x86_64 0:8.0.17-1.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

=============================================================================================================================
Package Arch Version Repository Size
=============================================================================================================================
Installing:
mysql-shell x86_64 8.0.17-1.el7 mysql-tools-community 15 M

Transaction Summary
=============================================================================================================================
Install 1 Package

Total download size: 15 M
Installed size: 50 M
Is this ok [y/d/N]: y
Downloading packages:
mysql-shell-8.0.17-1.el7.x86_64.rpm | 15 MB 00:00:02
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : mysql-shell-8.0.17-1.el7.x86_64 1/1
Verifying : mysql-shell-8.0.17-1.el7.x86_64 1/1

Installed:
mysql-shell.x86_64 0:8.0.17-1.el7

Complete!

Verify the MySQL shell installation:

[jonadams@localhost lib]$ mysqlsh
MySQL Shell 8.0.17

Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its affiliates.
Other names may be trademarks of their respective owners.

Type '\help' or '\?' for help; '\quit' to exit.
MySQL JS >

The default shell interface for the MySQL shell utility is JavaScript.  To switch between the different interfaces, use these commands.  The prompt will change to reflect the current interface.

\js for JavaScript

\sql for SQL

\py for Python

MySQL Shell comes with the option of automatically saving the history of commands, but it is disabled by default. It is a handy feature, so use this process to activate it.

Show settings and look for history.autoSave

MySQL JS > shell.options

MySQL JS > \option --persist history.autoSave=1

You can close and reopen the session and in the new one press the arrow up key to verify that the commands from previous sessions are available.

Switch to SQL mode and connect to the MySQL instance:

MySQL … JS > \sql

MySQL SQL > \c root@localhost
Creating a session to 'root@localhost'
Please provide the password for 'root@localhost': ***************
Save password for 'root@localhost'? [Y]es/[N]o/Ne[v]er (default No): Y
Fetching schema names for autocompletion... Press ^C to stop.
Your MySQL connection id is 8 (X protocol)
Server version: 8.0.17 MySQL Community Server - GPL
No default schema selected; type \use <schema> to set one.

Notice that I opted for MySQL shell to save my user account password.  Next time I login, I will not have to type in the password after the connect command.

Now you can submit the usual SQL commands

SQL > show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
+--------------------+
4 rows in set (0.0006 sec)

Exit from MySQL Shell with “\q” or “\exit”

MySQL … JS > \q

Now that your test environment is set up, I recommend shutting it down and backing it up with the VirtualBox export tool in case your experimentation goes wrong.  It would also be a good time to review the MySQL 8 online documentation.  I recommend concentrating first on backup and recovery commands.

https://dev.mysql.com/doc/

Enjoy!

 

Creating SQL Server index fragmentation

In a few client environments, I have seen index monitoring and maintenance procedures triggered by fragmentation levels.  I have sometimes wondered how these procedures were tested when they were put in place, other than waiting for fragmentation to occur through normal operations.  Wouldn’t it be a good idea to have a process that generates a level of fragmentation to test these processes?

Here is a process that can be used as the basis for generating index fragmentation to properly test index monitoring and maintenance procedures.  I will create a test table, populate it with data, create an index on the test table, then execute a process that keeps inserting data into the test table until the desired level of index fragmentation is reached.

During testing, 100 rows were inserted into the test table initially, then each iteration of the fragmentation process inserted another 100 rows.  It took only two iterations through the process to produce 50 percent index fragmentation.  For more precise fragmentation, use a larger number of rows during the initial table population routine or a fewer number of rows during the data insert.

NOTE – This process was created and tested on SQL Server 2016.  Be sure to set the database environment where you want to create the test table before executing these commands.

--Drop the test table if it exists
drop TABLE dbo.TestTable
GO

--Create the test table
CREATE TABLE dbo.TestTable
(
        RowID int IDENTITY(1,1),
	MyKeyField VARCHAR(10) NOT NULL,
	MyDate DATETIME NOT NULL,
	MyInt DATETIME NOT NULL,
	MyString VARCHAR(30) NOT NULL
)
GO

--Code to insert first rows into table 
DECLARE @RowCount INT
DECLARE @RowString VARCHAR(10)
DECLARE @Random INT
DECLARE @Upper INT
DECLARE @Lower INT
DECLARE @InsertDate DATETIME
DECLARE @s CHAR(30)

SET @Lower = -730
SET @Upper = -1
SET @RowCount = 0

WHILE @RowCount < 100  --Set this value to the number of rows desired
BEGIN
	SET @RowString = CAST(@RowCount AS VARCHAR(10))
	SELECT @Random = ROUND(((@Upper - @Lower -1) * RAND() + @Lower), 0)
	SET @InsertDate = DATEADD(dd, @Random, GETDATE())

SET @s = (
SELECT
	c1 AS [text()]
FROM
	(
	SELECT TOP (30) c1  --Change this value to the desired length of the resulting string.
	FROM
	  (
    VALUES
      ('A'), ('B'), ('C'), ('D'), ('E'), ('F'), ('G'), ('H'), ('I'), ('J'),
      ('K'), ('L'), ('M'), ('N'), ('O'), ('P'), ('Q'), ('R'), ('S'), ('T'),
      ('U'), ('V'), ('W'), ('X'), ('Y'), ('Z'), ('0'), ('1'), ('2'), ('3'),
      ('4'), ('5'), ('6'), ('7'), ('8'), ('9'), ('!'), ('@'), ('#'), ('$'),
      ('%'), ('&'), ('*'), ('('), (')'), ('?'), ('+'), ('-'), (','), ('.')
        ) AS T1(c1)
	ORDER BY ABS(CHECKSUM(NEWID()))
	) AS T2
FOR XML PATH('')
);

	INSERT INTO TestTable
		(MyKeyField
		,MyDate
		,MyInt
		,MyString)
	VALUES
		(REPLICATE('0', 10 - DATALENGTH(@RowString)) + @RowString
		, @InsertDate
		,RAND(@RowCount)
		,@s)

	SET @RowCount = @RowCount + 1
END
GO


--Verify number of row inserted into test table
select count(*)
from TestTable;


--Create index on test table
SET ANSI_PADDING ON
GO

CREATE NONCLUSTERED INDEX [dbo.TestTableIndex] ON [dbo].[TestTable]
(
	[MyString] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON)
GO

--Check initial fragmentation
select avg_fragmentation_in_percent
    from sys.dm_db_index_physical_stats (DB_ID(), NULL, NULL, NULL, 'LIMITED')
    where object_id = (select object_id from sys.tables where name = 'TestTable')
    AND index_type_desc = 'NONCLUSTERED INDEX'
GO


--Routine to loop through row inserts until fragmentation reaches desired level
DECLARE @FragEnd INT
DECLARE @FragPercent INT

SET @FragEnd = 30      --Set this number to degree of fragmentation desired 
SET @FragPercent = 0  
SET IDENTITY_INSERT [dbo].[TestTable] ON

WHILE @FragEnd > @FragPercent
BEGIN

  select @FragPercent = max(avg_fragmentation_in_percent)
  from sys.dm_db_index_physical_stats (DB_ID(), NULL, NULL, NULL, 'LIMITED')
  where object_id = (select object_id from sys.tables where name = 'TestTable')
  AND ALLOC_unit_type_desc = 'IN_ROW_DATA';

  insert into [dbo].[TestTable] ( [RowID],[MyKeyField],[MyDate],[MyInt],[MyString])
  select top(100) [RowID],[MyKeyField],[MyDate],[MyInt],[MyString]
  from dbo.TestTable;

END;
GO